Exemplo n.º 1
0
    def __init__(self, mode, cnf=None):
        # type: (str, Conf) -> None
        """
        :param mode: values in {'train', 'val'}
        :param cnf: configuration object
        :param sigma: parameter that controls the "spread" of the 3D gaussians:param cnf: configuration object
        """
        self.cnf = cnf
        self.mode = mode
        assert mode in {'train', 'val',
                        'test'}, '`mode` must be \'train\' or \'val\''

        self.sf_pairs = []
        k = 10 if self.mode == 'train' else 100
        for d in Path(self.cnf.mot_synth_path / 'frames' / self.mode).dirs():
            sequence_name = str(d.basename())
            for frame_number in range(1, 900, k):
                self.sf_pairs.append((sequence_name, frame_number))

        self.g = (self.cnf.sigma * 5 +
                  1) if (self.cnf.sigma * 5) % 2 == 0 else self.cnf.sigma * 5
        self.gaussian_patch = utils.gkern(w=self.g,
                                          h=self.g,
                                          d=self.g,
                                          center=(self.g // 2, self.g // 2,
                                                  self.g // 2),
                                          s=self.cnf.sigma,
                                          device='cpu')
Exemplo n.º 2
0
    def add_vote(self, origin, destination, rank, ksize=31, ksig=3):

        # Generate the kernel to vote
        k = utils.gkern(ksize, ksig)
        k /= k.max()
        k *= 1 / rank
        offs = k.shape[0] // 2

        # Centering the origin
        dy = -1 * origin[0]
        dx = -1 * origin[1]

        accu_point = (destination[0] + dy + self.accumulator_center[0],
                      destination[1] + dx + self.accumulator_center[1])

        # Adds the splash to the accumulator
        if accu_point != self.accumulator_center:
            tmp = self.accumulator[accu_point[0] - offs:accu_point[0] + offs +
                                   1, accu_point[1] - offs:accu_point[1] +
                                   offs + 1]
            self.accumulator[accu_point[0] - offs:accu_point[0] + offs + 1,
                             accu_point[1] - offs:accu_point[1] + offs +
                             1] += k[:tmp.shape[0], :tmp.shape[1]]

            # V set
            # currently it's saving only the central vote for better memory vs precision...
            if self.init:
                self.votes = np.array([[
                    accu_point[0], accu_point[1], origin[0], origin[1],
                    destination[0], destination[1]
                ]])
                self.init = False
            else:
                self.votes = np.concatenate(
                    (self.votes,
                     np.array([[
                         accu_point[0], accu_point[1], origin[0], origin[1],
                         destination[0], destination[1]
                     ]])),
                    axis=0)
def run_tensorflow(image,
                   noiseImage,
                   contentImage,
                   output_directory,
                   depth,
                   weightsLayers,
                   weightsLayersContent,
                   weightsPyramid,
                   weightsPyramidContent,
                   iter,
                   betaPar,
                   vgg_class=vgg19.Vgg19):

    print('Begin execution of run_tensorflow')
    print(np.shape(image))

    # Variable for storing the style image
    style = tf.get_variable(name="style_image",
                            dtype=tf.float64,
                            initializer=image,
                            trainable=False)
    style = tf.cast(style, tf.float32)
    noise = tf.get_variable(name="noise_image",
                            dtype=tf.float32,
                            initializer=tf.constant(noiseImage),
                            trainable=True)
    content = tf.get_variable(name="content_image",
                              dtype=tf.float64,
                              initializer=tf.constant(contentImage),
                              trainable=False)
    content = tf.cast(content, tf.float32)
    #noise = tf.cast(noise, tf.float32)

    styleList = [style]
    noiseList = [noise]
    contentList = [content]
    fpassListContent = []
    fpassListstyle = []  #list of vgg objects
    fpassListNoise = []
    outListstyle = []  #list of output layer of vgg objects
    outListNoise = []
    outListContent = []

    ## TODO ##
    # move the pyramid code to a funciton
    #it recieves the styleList and namescope name, returns the updated list
    with tf.name_scope('build_pyramid_style'):
        gaussKerr = tf.get_variable(initializer=np.reshape(
            utils.gkern(5), (5, 5, 1, 1)),
                                    trainable=False,
                                    dtype='float64',
                                    name='gauss_kernel')
        gaussKerr = tf.cast(gaussKerr, tf.float32)
        downsamp_filt = tf.get_variable(initializer=np.reshape(
            np.array([[1., 0.], [0., 0.]]), (2, 2, 1, 1)),
                                        trainable=False,
                                        dtype='float64',
                                        name='downsample_filter')
        downsamp_filt = tf.cast(downsamp_filt, tf.float32)

        for i in range(depth):
            with tf.name_scope('cycle%d' % (i)):
                [tR, tG, tB] = tf.unstack(styleList[i], num=3, axis=3)
                tR = tf.expand_dims(tR, 3)
                tG = tf.expand_dims(tG, 3)
                tB = tf.expand_dims(tB, 3)

                #convolve each input image with the gaussian filter
                tR_gauss = tf.nn.conv2d(tR,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')
                tG_gauss = tf.nn.conv2d(tG,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')
                tB_gauss = tf.nn.conv2d(tB,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')

                tR_downs = tf.nn.conv2d(tR_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')
                tG_downs = tf.nn.conv2d(tG_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')
                tB_downs = tf.nn.conv2d(tB_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')

                tmp = tf.concat([tR_downs, tG_downs, tB_downs], axis=3)

                styleList.append(tmp)

    ## TODO ##
    ## Find out what to do with the reuse
    with tf.name_scope('build_pyramid_noise'):
        #gaussKerr = tf.get_variable(initializer = np.reshape(utils.gkern(5), (5,5,1,1)), trainable=False, dtype='float64', name='gauss_kernel')
        #gaussKerr = tf.cast(gaussKerr, tf.float32, reuse=True)
        #downsamp_filt = tf.get_variable(initializer = np.reshape(np.array([[1.,0.],[0.,0.]]), (2,2,1,1)), trainable=False, dtype='float64', name='downsample_filter')
        #downsamp_filt = tf.cast(downsamp_filt, tf.float32, reuse=True)

        for i in range(depth):
            with tf.name_scope('cycle%d' % (i)):
                [tR, tG, tB] = tf.unstack(noiseList[i], num=3, axis=3)
                tR = tf.expand_dims(tR, 3)
                tG = tf.expand_dims(tG, 3)
                tB = tf.expand_dims(tB, 3)

                #convolve each input image with the gaussian filter
                tR_gauss = tf.nn.conv2d(tR,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')
                tG_gauss = tf.nn.conv2d(tG,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')
                tB_gauss = tf.nn.conv2d(tB,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')

                tR_downs = tf.nn.conv2d(tR_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')
                tG_downs = tf.nn.conv2d(tG_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')
                tB_downs = tf.nn.conv2d(tB_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')

                tmp = tf.concat([tR_downs, tG_downs, tB_downs], axis=3)

                noiseList.append(tmp)

    with tf.name_scope('build_pyramid_content'):
        #gaussKerr = tf.get_variable(initializer = np.reshape(utils.gkern(5), (5,5,1,1)), trainable=False, dtype='float64', name='gauss_kernel')
        #gaussKerr = tf.cast(gaussKerr, tf.float32, reuse=True)
        #downsamp_filt = tf.get_variable(initializer = np.reshape(np.array([[1.,0.],[0.,0.]]), (2,2,1,1)), trainable=False, dtype='float64', name='downsample_filter')
        #downsamp_filt = tf.cast(downsamp_filt, tf.float32, reuse=True)

        for i in range(depth):
            with tf.name_scope('cycle%d' % (i)):
                [tR, tG, tB] = tf.unstack(contentList[i], num=3, axis=3)
                tR = tf.expand_dims(tR, 3)
                tG = tf.expand_dims(tG, 3)
                tB = tf.expand_dims(tB, 3)

                #convolve each input image with the gaussian filter
                tR_gauss = tf.nn.conv2d(tR,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')
                tG_gauss = tf.nn.conv2d(tG,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')
                tB_gauss = tf.nn.conv2d(tB,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')

                tR_downs = tf.nn.conv2d(tR_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')
                tG_downs = tf.nn.conv2d(tG_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')
                tB_downs = tf.nn.conv2d(tB_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')

                tmp = tf.concat([tR_downs, tG_downs, tB_downs], axis=3)

                contentList.append(tmp)

    # fpassList is a list fo vgg instances
    # here we run the build method for each instance and
    # store the output (last layer) on outList
    with tf.name_scope('forward_pass_style'):
        for j in range(len(styleList)):
            with tf.name_scope('cycle%d' % (j)):
                fpassListstyle.append(vgg_class())
                out = fpassListstyle[j].build(styleList[j])
                outListstyle.append(out)

    with tf.name_scope('forward_pass_noise'):
        for j in range(len(styleList)):
            with tf.name_scope('cycle%d' % (j)):
                fpassListNoise.append(vgg_class())
                out = fpassListNoise[j].build(noiseList[j])
                outListNoise.append(out)

    with tf.name_scope('forward_pass_content'):
        for j in range(len(contentList)):
            with tf.name_scope('cycle%d' % (j)):
                fpassListContent.append(vgg_class())
                out = fpassListContent[j].build(contentList[j])
                outListContent.append(out)

    ###################################################
    ## Loss function
    with tf.name_scope('lossStyle'):
        #Check that there are as many weigthLayers
        assert len(weightsLayers) >= fpassListstyle[0].getLayersCount()
        assert len(weightsPyramid) >= len(fpassListstyle)

        loss_style = 0.0
        #for i in range(0,5): #layers
        for j in range(len(fpassListstyle)):  #pyramid levels
            with tf.name_scope('cyclePyramid%d' % (j)):
                loss_pyra = 0.0
                for i in range(0, fpassListstyle[0].getLayersCount()):  #layers
                    with tf.name_scope('cycleLayer%d' % (i)):
                        origin = fpassListstyle[j].conv_list[i]
                        new = fpassListNoise[j].conv_list[i]
                        shape = origin.get_shape().as_list()
                        N = shape[3]  #number of channels (filters)
                        M = shape[1] * shape[2]  #width x height
                        F = tf.reshape(origin, (-1, N),
                                       name='CreateF_style')  #N x M
                        Gram_o = (
                            tf.matmul(tf.transpose(F, name='transpose_style'),
                                      F,
                                      name='Gram_style') / (N * M))
                        F_t = tf.reshape(new, (-1, N), name='CreateF_noise')
                        Gram_n = tf.matmul(tf.transpose(
                            F_t, name='transpose_noise'),
                                           F_t,
                                           name='Gram_noise') / (N * M)
                        loss = tf.nn.l2_loss(
                            (Gram_o - Gram_n), name='lossGramsubstraction') / 4
                        loss = tf.scalar_mul(weightsLayers[i], loss)
                        loss_pyra = tf.add(loss_pyra, loss)
                loss_pyra = tf.scalar_mul(weightsPyramid[j], loss_pyra)
                loss_style = tf.add(loss_style, loss_pyra)
        tf.summary.scalar("loss_style", loss_style)

    with tf.name_scope('lossContent'):
        #Check that there are as many weigthLayers
        assert len(
            weightsLayersContent) >= fpassListContent[0].getLayersCount()
        assert len(weightsPyramidContent) >= len(fpassListContent)

        loss_content = 0.0
        #for i in range(0,5): #layers
        for j in range(len(fpassListContent)):  #pyramid levels
            with tf.name_scope('cyclePyramid%d' % (j)):
                loss_pyra = 0.0
                for i in range(0,
                               fpassListContent[0].getLayersCount()):  #layers
                    with tf.name_scope('cycleLayer%d' % (i)):
                        con = fpassListContent[j].conv_list[i]
                        new = fpassListNoise[j].conv_list[i]
                        shape = con.get_shape().as_list()
                        N = shape[3]  #number of channels (filters)
                        M = shape[1] * shape[2]  #width x height
                        P = tf.reshape(con, (-1, N),
                                       name='CreateF_content')  #N x M
                        #Gram_o = (tf.matmul(tf.transpose(F, name='transpose_style'), F, name='Gram_style') / (N * M))
                        F = tf.reshape(new, (-1, N), name='CreateF_noise')
                        #Gram_n = tf.matmul(tf.transpose(F_t, name='transpose_noise'), F_t, name='Gram_noise') / (N * M)
                        loss = tf.nn.l2_loss(
                            (F - P), name='lossGramsubstraction') / 2
                        loss = tf.scalar_mul(weightsLayersContent[i], loss)
                        loss_pyra = tf.add(loss_pyra, loss)
                loss_pyra = tf.scalar_mul(weightsPyramidContent[j], loss_pyra)
                loss_content = tf.add(loss_content, loss_pyra)
        tf.summary.scalar("loss_content", loss_content)

    #betaPar = 0.5
    alpha = tf.constant(1, dtype=tf.float32, name="alpha")
    beta = tf.constant(betaPar, dtype=tf.float32, name="beta")

    loss_sum = tf.scalar_mul(loss_content, alpha) + tf.scalar_mul(
        loss_style, beta)

    train_step = tf.train.AdamOptimizer(0.01).minimize(loss_sum,
                                                       var_list=[noise])
    #train_step = tf.train.AdagradOptimizer(0.01).minimize(loss_sum, var_list=[noise])

    restrict = tf.maximum(0., tf.minimum(1., noise), name="Restrict_noise")
    r_noise = noise.assign(restrict)

    tmpFile = os.path.join(output_directory, "tensor/")
    if not os.path.exists(tmpFile):
        os.makedirs(tmpFile)

    #https://www.tensorflow.org/api_docs/python/tf/contrib/opt/ScipyOptimizerInterface
    optimizer = tf.contrib.opt.ScipyOptimizerInterface(
        loss_sum,
        var_to_bounds={noise: (0, 1)},
        method='L-BFGS-B',
        options={'maxiter': iter})

    #trainOP = optimizer.minimze

    summary_writer = tf.summary.FileWriter(tmpFile, tf.get_default_graph())

    merged_summary_op = tf.summary.merge_all()

    Iterations = iter
    counter = 0
    temp_loss = 0
    allLoss = []

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        optimizer.minimize(sess)

        #tmp = fpassListContent[0].eval()
        #tf.summary.image('content', tmp, 3)

        answer = noise.eval()
        answer = answer.reshape(np.shape(answer)[1], np.shape(answer)[2], 3)
        skimage.io.imsave(
            os.path.join(output_directory, "final_texture_noHistMatch.png"),
            answer)
        answer = (utils.histogram_matching(answer, image) *
                  255.).astype('uint8')
        skimage.io.imsave(os.path.join(output_directory, "final_texture.png"),
                          answer)

        #Save the pyramid
        for w in range(1, len(noiseList)):
            outputPyramid = noiseList[w].eval()
            tmp = outputPyramid.reshape(
                np.shape(outputPyramid)[1],
                np.shape(outputPyramid)[2], 3)
            tmp = (utils.histogram_matching(tmp, image) * 255.).astype('uint8')
            skimage.io.imsave(
                os.path.join(output_directory,
                             "final_texture_pyra%s.png" % (str(w))), tmp)
Exemplo n.º 4
0
def run_tensorflow00(image, output_directory, vgg_class=vgg19.Vgg19):

    print('Begin execution of run_tensorflow')
    print(np.shape(image))

    with tf.name_scope('forward_pass'):
        tmp_vgg = vgg_class()
        x = tf.placeholder(dtype=tf.float32,
                           shape=np.shape(image),
                           name='placeholder_x')

        out = tmp_vgg.build(x)

    with tf.name_scope('forward_pass_var'):
        tmp_vgg2 = vgg_class()
        #x = tf.placeholder(dtype = tf.float32, shape = np.shape(image), name = 'placeholder_x')

        x2 = tf.get_variable("x_var",
                             dtype=tf.float64,
                             initializer=image,
                             trainable=False)
        x2 = tf.cast(x2, tf.float32)
        out2 = tmp_vgg2.build(x2)

    with tf.name_scope('build_pyramid'):
        gaussKerr = tf.get_variable(initializer=np.reshape(
            utils.gkern(5), (5, 5, 1, 1)),
                                    trainable=False,
                                    dtype='float64',
                                    name='gauss_kernel')
        gaussKerr = tf.cast(gaussKerr, tf.float32)
        downsamp_filt = tf.get_variable(initializer=np.reshape(
            np.array([[1., 0.], [0., 0.]]), (2, 2, 1, 1)),
                                        trainable=False,
                                        dtype='float64',
                                        name='downsample_filter')
        downsamp_filt = tf.cast(downsamp_filt, tf.float32)

        [tR, tG, tB] = tf.unstack(x2, num=3, axis=3)
        tR = tf.expand_dims(tR, 3)
        tG = tf.expand_dims(tG, 3)
        tB = tf.expand_dims(tB, 3)

        #convolve each input image with the gaussian filter

        tR_gauss = tf.nn.conv2d(tR,
                                gaussKerr,
                                strides=[1, 1, 1, 1],
                                padding='SAME')
        tG_gauss = tf.nn.conv2d(tG,
                                gaussKerr,
                                strides=[1, 1, 1, 1],
                                padding='SAME')
        tB_gauss = tf.nn.conv2d(tB,
                                gaussKerr,
                                strides=[1, 1, 1, 1],
                                padding='SAME')

        tR_downs = tf.nn.conv2d(tR_gauss,
                                downsamp_filt,
                                strides=[1, 2, 2, 1],
                                padding='SAME')
        tG_downs = tf.nn.conv2d(tG_gauss,
                                downsamp_filt,
                                strides=[1, 2, 2, 1],
                                padding='SAME')
        tB_downs = tf.nn.conv2d(tB_gauss,
                                downsamp_filt,
                                strides=[1, 2, 2, 1],
                                padding='SAME')

        tmp = tf.concat([tR_downs, tG_downs, tB_downs], axis=3)

    tmpFile = os.path.join(output_directory, "tensor/")
    if not os.path.exists(tmpFile):
        os.makedirs(tmpFile)

    summary_writer = tf.summary.FileWriter(tmpFile, tf.get_default_graph())
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        fpass = sess.run(out, feed_dict={x: image})
        fpass2 = sess.run(out2)
        print(np.shape(fpass2))

        pyramid = sess.run(tmpmp)
        print(np.shape(tmp))
Exemplo n.º 5
0
def run_tensorflow(image,
                   noiseImage,
                   output_directory,
                   depth,
                   weightsLayers,
                   weightsPyramid,
                   iter,
                   vgg_class=vgg19.Vgg19):

    print('Begin execution of run_tensorflow')
    print(np.shape(image))

    # Variable for storing the target image
    target = tf.get_variable(name="target_image",
                             dtype=tf.float64,
                             initializer=image,
                             trainable=False)
    target = tf.cast(target, tf.float32)
    noise = tf.get_variable(name="noise_image",
                            dtype=tf.float32,
                            initializer=tf.constant(noiseImage),
                            trainable=True)
    #noise = tf.cast(noise, tf.float32)

    targetList = [target]
    noiseList = [noise]
    fpassListTarget = []  #list of vgg objects
    fpassListNoise = []
    outListTarget = []  #list of output layer of vgg objects
    outListNoise = []

    ## TODO ##
    # move the pyramid code to a funciton
    #it recieves the targetList and namescope name, returns the updated list
    with tf.name_scope('build_pyramid_target'):
        gaussKerr = tf.get_variable(initializer=np.reshape(
            utils.gkern(5), (5, 5, 1, 1)),
                                    trainable=False,
                                    dtype='float64',
                                    name='gauss_kernel')
        gaussKerr = tf.cast(gaussKerr, tf.float32)
        downsamp_filt = tf.get_variable(initializer=np.reshape(
            np.array([[1., 0.], [0., 0.]]), (2, 2, 1, 1)),
                                        trainable=False,
                                        dtype='float64',
                                        name='downsample_filter')
        downsamp_filt = tf.cast(downsamp_filt, tf.float32)

        for i in range(depth):
            with tf.name_scope('cycle%d' % (i)):
                [tR, tG, tB] = tf.unstack(targetList[i], num=3, axis=3)
                tR = tf.expand_dims(tR, 3)
                tG = tf.expand_dims(tG, 3)
                tB = tf.expand_dims(tB, 3)

                #convolve each input image with the gaussian filter
                tR_gauss = tf.nn.conv2d(tR,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')
                tG_gauss = tf.nn.conv2d(tG,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')
                tB_gauss = tf.nn.conv2d(tB,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')

                tR_downs = tf.nn.conv2d(tR_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')
                tG_downs = tf.nn.conv2d(tG_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')
                tB_downs = tf.nn.conv2d(tB_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')

                tmp = tf.concat([tR_downs, tG_downs, tB_downs], axis=3)

                targetList.append(tmp)

    ## TODO ##
    ## Find out what to do with the reuse
    with tf.name_scope('build_pyramid_noise'):
        #gaussKerr = tf.get_variable(initializer = np.reshape(utils.gkern(5), (5,5,1,1)), trainable=False, dtype='float64', name='gauss_kernel')
        #gaussKerr = tf.cast(gaussKerr, tf.float32, reuse=True)
        #downsamp_filt = tf.get_variable(initializer = np.reshape(np.array([[1.,0.],[0.,0.]]), (2,2,1,1)), trainable=False, dtype='float64', name='downsample_filter')
        #downsamp_filt = tf.cast(downsamp_filt, tf.float32, reuse=True)

        for i in range(depth):
            with tf.name_scope('cycle%d' % (i)):
                [tR, tG, tB] = tf.unstack(noiseList[i], num=3, axis=3)
                tR = tf.expand_dims(tR, 3)
                tG = tf.expand_dims(tG, 3)
                tB = tf.expand_dims(tB, 3)

                #convolve each input image with the gaussian filter
                tR_gauss = tf.nn.conv2d(tR,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')
                tG_gauss = tf.nn.conv2d(tG,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')
                tB_gauss = tf.nn.conv2d(tB,
                                        gaussKerr,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')

                tR_downs = tf.nn.conv2d(tR_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')
                tG_downs = tf.nn.conv2d(tG_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')
                tB_downs = tf.nn.conv2d(tB_gauss,
                                        downsamp_filt,
                                        strides=[1, 2, 2, 1],
                                        padding='SAME')

                tmp = tf.concat([tR_downs, tG_downs, tB_downs], axis=3)

                noiseList.append(tmp)

    # fpassList is a list fo vgg instances
    # here we run the build method for each instance and
    # store the output (last layer) on outList
    with tf.name_scope('forward_pass_target'):
        for j in range(len(targetList)):
            with tf.name_scope('cycle%d' % (j)):
                fpassListTarget.append(vgg_class())
                out = fpassListTarget[j].build(targetList[j])
                outListTarget.append(out)

    with tf.name_scope('forward_pass_noise'):
        for j in range(len(targetList)):
            with tf.name_scope('cycle%d' % (j)):
                fpassListNoise.append(vgg_class())
                out = fpassListNoise[j].build(noiseList[j])
                outListNoise.append(out)

    ###################################################
    ## Loss function
    with tf.name_scope('lossFunction'):
        #Check that there are as many weigthLayers
        assert len(weightsLayers) >= fpassListTarget[0].getLayersCount()
        assert len(weightsPyramid) >= len(fpassListTarget)

        loss_sum = 0.0
        #for i in range(0,5): #layers
        for j in range(len(fpassListTarget)):  #pyramid levels
            with tf.name_scope('cyclePyramid%d' % (i)):
                loss_pyra = 0.0
                for i in range(0,
                               fpassListTarget[0].getLayersCount()):  #layers
                    with tf.name_scope('cycleLayer%d' % (i)):
                        origin = fpassListTarget[j].conv_list[i]
                        new = fpassListNoise[j].conv_list[i]
                        shape = origin.get_shape().as_list()
                        N = shape[3]  #number of channels (filters)
                        M = shape[1] * shape[2]  #width x height
                        F = tf.reshape(origin, (-1, N),
                                       name='CreateF_target')  #N x M
                        Gram_o = (
                            tf.matmul(tf.transpose(F, name='transpose_target'),
                                      F,
                                      name='Gram_target') / (N * M))
                        F_t = tf.reshape(new, (-1, N), name='CreateF_noise')
                        Gram_n = tf.matmul(tf.transpose(
                            F_t, name='transpose_noise'),
                                           F_t,
                                           name='Gram_noise') / (N * M)
                        loss = tf.nn.l2_loss(
                            (Gram_o - Gram_n), name='lossGramsubstraction') / 2
                        loss = tf.scalar_mul(weightsLayers[i], loss)
                        loss_pyra = tf.add(loss_pyra, loss)
                loss_pyra = tf.scalar_mul(weightsPyramid[j], loss_pyra)
                loss_sum = tf.add(loss_sum, loss_pyra)
        tf.summary.scalar("loss_sum", loss_sum)

    yolo = tf.Variable(np.zeros((20, 20)), name='yolo')
    yolo2 = tf.get_variable("big_matrix",
                            shape=(784, 10),
                            initializer=tf.zeros_initializer())

    print(yolo)
    print(yolo2)
    print(noise)
    dummy = tf.get_variable(name="dummy",
                            dtype=tf.float64,
                            initializer=np.zeros((5, 5)),
                            trainable=True)
    #train_step = tf.train.AdamOptimizer(0.01).minimize(loss_sum, var_list=[noise])
    train_step = tf.train.AdagradOptimizer(0.01).minimize(loss_sum,
                                                          var_list=[noise])

    restrict = tf.maximum(0., tf.minimum(1., noise), name="Restrict_noise")
    r_noise = noise.assign(restrict)

    tmpFile = os.path.join(output_directory, "tensor/")
    if not os.path.exists(tmpFile):
        os.makedirs(tmpFile)

    summary_writer = tf.summary.FileWriter(tmpFile, tf.get_default_graph())

    merged_summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        #fpass2 = sess.run(out2)
        #print(np.shape(fpass2))

        #pyramid = sess.run(targetList)
        #print(np.shape(targetList))

        #sess.run([outListTarget, outListNoise])
        Iterations = iter
        counter = 0
        temp_loss = 0
        allLoss = []

        for i in range(0, Iterations):
            a = sess.run([train_step])
            print(type(a))
            sess.run([r_noise])
            print(np.shape(r_noise))

            if i == 0:
                temp_loss = loss_sum.eval()
            if i % 10 == 0:
                loss = loss_sum.eval()
                if loss > temp_loss:
                    counter += 1
                sys.stdout.write('\r')
                sys.stdout.write("[%-50s] %d/%d ,loss=%e" %
                                 ('=' * int(i * 50 / iter), i, iter, loss))
                sys.stdout.flush()
                temp_loss = loss
            if i % 10 == 0 and i != 0 and i <= 200:
                answer = noise.eval()
                answer = answer.reshape(
                    np.shape(answer)[1],
                    np.shape(answer)[2], 3)
                #answer = (answer*255).astype('uint8')
                answer = (utils.histogram_matching(answer, image) *
                          255.).astype('uint8')
                # print('Mean = ', np.mean(answer))
                filename = os.path.join(output_directory,
                                        "%safter.jpg" % (str(i)))
                skimage.io.imsave(filename, answer)

                #Save the pyramid
                for w in range(1, len(noiseList)):
                    outputPyramid = noiseList[w].eval()
                    tmp = outputPyramid.reshape(
                        np.shape(outputPyramid)[1],
                        np.shape(outputPyramid)[2], 3)
                    tmp = (utils.histogram_matching(tmp, image) *
                           255.).astype('uint8')
                    filename = os.path.join(
                        output_directory,
                        "%safter%spyra.jpg" % (str(i), str(w)))
                    skimage.io.imsave(filename, tmp)
            if i % 200 == 0 and i != 0 and i > 200:
                answer = noise.eval()
                answer = answer.reshape(
                    np.shape(answer)[1],
                    np.shape(answer)[2], 3)
                #answer = (answer*255).astype('uint8')
                answer = (utils.histogram_matching(answer, image) *
                          255.).astype('uint8')
                # print('Mean = ', np.mean(answer))
                filename = os.path.join(output_directory,
                                        "%safter.jpg" % (str(i)))
                skimage.io.imsave(filename, answer)

                #Save the pyramid
                for w in range(1, len(noiseList)):
                    outputPyramid = noiseList[w].eval()
                    tmp = outputPyramid.reshape(
                        np.shape(outputPyramid)[1],
                        np.shape(outputPyramid)[2], 3)
                    tmp = (utils.histogram_matching(tmp, image) *
                           255.).astype('uint8')
                    filename = os.path.join(
                        output_directory,
                        "%safter%spyra.jpg" % (str(i), str(w)))
                    skimage.io.imsave(filename, tmp)

            #allLoss.append(loss_sum.eval())
            allLoss.append(temp_loss)

            if counter > 3000:
                print('\n', 'Early Stop!')
                break

            summary_str = sess.run(merged_summary_op)
            summary_writer.add_summary(summary_str, 1)

        answer = noise.eval()
        answer = answer.reshape(np.shape(answer)[1], np.shape(answer)[2], 3)
        skimage.io.imsave(
            os.path.join(output_directory, "final_texture_noHistMatch.png"),
            answer)
        answer = (utils.histogram_matching(answer, image) *
                  255.).astype('uint8')
        skimage.io.imsave(os.path.join(output_directory, "final_texture.png"),
                          answer)

        #Save the pyramid
        for w in range(1, len(noiseList)):
            outputPyramid = noiseList[w].eval()
            tmp = outputPyramid.reshape(
                np.shape(outputPyramid)[1],
                np.shape(outputPyramid)[2], 3)
            tmp = (utils.histogram_matching(tmp, image) * 255.).astype('uint8')
            skimage.io.imsave(
                os.path.join(output_directory,
                             "final_texture_pyra%s.png" % (str(w))), tmp)

        #Some plotting
        plotting(allLoss, iter, output_directory)
Exemplo n.º 6
0
def run_synthesis_pyramid(tex,
                          images,
                          proc_img,
                          iterations,
                          output_directory,
                          weightsLayers,
                          weightsPyramid,
                          vgg_class=vgg19.Vgg19):
    config = tf.ConfigProto()

    gaussKerr = tf.cast(
        tf.Variable(np.reshape(utils.gkern(5), (5, 5, 1, 1)),
                    trainable=False,
                    dtype='float64'), tf.float32)

    #os.environ["CUDA_VISIBLE_DEVICES"]="0"
    #exit(0)
    with tf.Session(config=config) as sess:
        vggs = [vgg_class() for i in range(len(images))]
        vggs2 = [vgg_class() for i in range(len(images))]

        for j in range(len(tex) - 1):
            # Pyramid in TF
            [tR, tG, tB] = tf.unstack(tex[j], num=3, axis=3)
            tR = tf.expand_dims(tR, 3)
            tG = tf.expand_dims(tG, 3)
            tB = tf.expand_dims(tB, 3)

            #convolve each input image with the gaussian filter

            tR_gauss = tf.nn.conv2d(tR,
                                    gaussKerr,
                                    strides=[1, 1, 1, 1],
                                    padding='SAME')
            tG_gauss = tf.nn.conv2d(tG,
                                    gaussKerr,
                                    strides=[1, 1, 1, 1],
                                    padding='SAME')
            tB_gauss = tf.nn.conv2d(tB,
                                    gaussKerr,
                                    strides=[1, 1, 1, 1],
                                    padding='SAME')

            #tmpR = tf.py_func(downSample, tR_gauss, tf.float32)
            #tmpG = tf.py_func(downSample, tG_gauss, tf.float32)
            #tmpB = tf.py_func(downSample, tB_gauss, tf.float32)

            tmp = tf.stack([tR_gauss, tG_gauss, tB_gauss], axis=3)
            tmp = tf.concat([tR_gauss, tG_gauss, tB_gauss], axis=3)

            print("<<<<<<<<<<<<<<<HYPNOTOAD>>>>>>>>>>>>>>>>>>>>>>>>")
            print(tmp)
            print(tmp.get_shape())
            print(tmp.get_shape().as_list()[1:])
            print("<<<<<<<<<<<<<<<HYPNOTOAD>>>>>>>>>>>>>>>>>>>>>>>>")

            newTmp = tf.py_func(downSample, [tmp], tf.float32)

            #print("<<<<<<<<<<<<<<<HYPNOTOAD>>>>>>>>>>>>>>>>>>>>>>>>")
            #print(newTmp)
            #print(newTmp.get_shape())
            #print(newTmp.get_shape().as_list()[1:])
            #print("<<<<<<<<<<<<<<<HYPNOTOAD>>>>>>>>>>>>>>>>>>>>>>>>")
            yolo = tex[j + 1].assign(newTmp)

        with tf.name_scope("origin"):
            for i in range(len(images)):
                vggs[i].build(images[i])

        with tf.name_scope("new"):
            for i in range(len(images)):
                vggs2[i].build(tex[i])

        #Check that there are as many weigthLayers
        assert len(weightsLayers) >= vggs[0].getLayersCount()
        assert len(weightsPyramid) >= len(images)

        loss_sum = 0.0
        #for i in range(0,5): #layers
        for j in range(len(images)):  #pyramid levels
            loss_pyra = 0.0
            for i in range(0, vggs[0].getLayersCount()):  #layers
                origin = vggs[j].conv_list[i]
                new = vggs2[j].conv_list[i]
                shape = origin.get_shape().as_list()
                N = shape[3]  #number of channels (filters)
                M = shape[1] * shape[2]  #width x height
                F = tf.reshape(origin, (-1, N))  #N x M
                Gram_o = (tf.matmul(tf.transpose(F), F) / (N * M))
                F_t = tf.reshape(new, (-1, N))
                Gram_n = tf.matmul(tf.transpose(F_t), F_t) / (N * M)
                loss = tf.nn.l2_loss((Gram_o - Gram_n)) / 2
                loss = tf.scalar_mul(weightsLayers[i], loss)
                loss_pyra = tf.add(loss_pyra, loss)
            loss_pyra = tf.scalar_mul(weightsPyramid[j], loss_pyra)
            loss_sum = tf.add(loss_sum, loss_pyra)
        tf.summary.scalar("loss_sum", loss_sum)

        train_step = tf.train.AdamOptimizer(0.01).minimize(loss_sum,
                                                           var_list=[tex])

        restrict = tf.maximum(0., tf.minimum(1., tex[0]))
        r_tex = tex[0].assign(restrict)

        merged_summary_op = tf.summary.merge_all()

        #sess.run(tf.initialize_all_variables())
        sess.run(tf.global_variables_initializer())

        Iteration = iterations
        counter = 0
        temp_loss = 0
        allLoss = []
        for i in range(0, Iteration):
            #print('ITERATION'+str(i))
            sess.run(train_step)
            sess.run(r_tex)
            sess.run(yolo)

            tmpFile = os.path.join(output_directory, "tensor/")
            if not os.path.exists(tmpFile):
                os.makedirs(tmpFile)

            #aa = "Users/falconr1/Documents/tmpDL/Code/result"
            print(tmpFile)
            summary_writer = tf.summary.FileWriter(tmpFile, sess.graph)

            if i == 0:
                temp_loss = loss_sum.eval()
            if i % 10 == 0:
                loss = loss_sum.eval()
                if loss > temp_loss:
                    counter += 1
                sys.stdout.write('\r')
                sys.stdout.write(
                    "[%-50s] %d/%d ,loss=%e" %
                    ('=' * int(i * 50 / Iteration), i, Iteration, loss))
                sys.stdout.flush()
                temp_loss = loss
            if i % 100 == 0 and i != 0:
                answer = tex[0].eval()
                answer = answer.reshape(
                    np.shape(answer)[1],
                    np.shape(answer)[2], 3)
                #answer = (answer*255).astype('uint8')
                answer = (utils.histogram_matching(answer, proc_img) *
                          255.).astype('uint8')
                # print('Mean = ', np.mean(answer))
                filename = os.path.join(output_directory,
                                        "%safter.jpg" % (str(i)))
                skimage.io.imsave(filename, answer)

                #Save the pyramid
                for w in range(1, len(tex)):
                    outputPyramid = tex[w].eval()
                    tmp = outputPyramid.reshape(
                        np.shape(outputPyramid)[1],
                        np.shape(outputPyramid)[2], 3)
                    tmp = (utils.histogram_matching(tmp, proc_img) *
                           255.).astype('uint8')
                    filename = os.path.join(
                        output_directory,
                        "%safter%spyra.jpg" % (str(i), str(w)))
                    skimage.io.imsave(filename, tmp)
            #allLoss.append(loss_sum.eval())
            allLoss.append(temp_loss)

            if counter > 3000:
                print('\n', 'Early Stop!')
                break
            '''
            answer = tex[0].eval()
            #print(answer)
            pyramid = create_pyramids((answer.reshape(np.shape(answer)[1], np.shape(answer)[2], 3)*255).astype('uint8'), levels)


            im2 = [i.reshape((1, np.shape(i)[0], np.shape(i)[1], 3)) for i in pyramid]
            im2 = [tf.cast(tf.convert_to_tensor(i, dtype="float64"), tf.float32) for i in im2]
            #t_pyramid = tuple(tf.convert_to_tensor(np.reshape(i, (1, np.shape[0], np.shape[1], 3))) for i in pyramid)
            #t_pyramid = tuple(tf.convert_to_tensor(i) for i in im2)

            #print(t_pyramid[0].get_shape())
            #print("**********************")

            for j in range(1,len(im2)):
                sess.run(tex[j].assign(im2[j]))

            #print(pyramid)
            '''

        summary_str = sess.run(merged_summary_op)
        summary_writer.add_summary(summary_str, 1)

        answer = tex[0].eval()
        answer = answer.reshape(np.shape(answer)[1], np.shape(answer)[2], 3)
        answer = (utils.histogram_matching(answer, proc_img) *
                  255.).astype('uint8')
        skimage.io.imsave(os.path.join(output_directory, "final_texture.png"),
                          answer)

        #Save the pyramid
        for w in range(1, len(tex)):
            outputPyramid = tex[w].eval()
            tmp = outputPyramid.reshape(
                np.shape(outputPyramid)[1],
                np.shape(outputPyramid)[2], 3)
            tmp = (utils.histogram_matching(tmp, proc_img) *
                   255.).astype('uint8')
            skimage.io.imsave(
                os.path.join(output_directory,
                             "final_texture_pyra%s.png" % (str(w))), tmp)

        #Some plotting
        plotting(allLoss, iterations, output_directory)
Exemplo n.º 7
0
def main():
    args = parse_arguments()
    result_folder = args.result_path     # 结果存放的目录
    input_folder = os.path.join(args.input_path,'images/')    # 输入图片的文件夹
    adv_img_folder = os.path.join(result_folder, 'images')    # 对抗样本保存文件夹
    if not os.path.exists(adv_img_folder):
        os.makedirs(adv_img_folder)
    if not os.path.exists(result_folder):
        os.makedirs(result_folder)       # 如果不存在目录则创建目录
    norm = imgnormalize()              # 标准化处理类
    Totensor = transforms.Compose([transforms.ToTensor()])                        # tensor化
    device = torch.device("cuda:0")                 # GPU ID
    source_model_names = args.source_model          # 替代模型
    num_source_models = len(source_model_names)     # 替代模型的数量
    source_models = []                              # 根据替代模型的名称分别加载对应的网络模型
    for model_name in source_model_names:
        print("Loading: {}".format(model_name))
        source_model = models.__dict__[model_name](pretrained=True).eval()
        for param in source_model.parameters():
            param.requires_grad = False             # 不可导
        source_model.to(device)                     # 计算环境
        source_models.append(source_model)          # ensemble
    seed_num=1                                      # 随机种子
    random.seed(seed_num)                           # random设置随机种子
    np.random.seed(seed_num)
    torch.manual_seed(seed_num)                     # torch随机种子
    torch.backends.cudnn.deterministic = True
    # TI 参数设置
    channels = 3                                      # 3通道
    kernel_size = 5                                   # kernel大小
    kernel = gkern(kernel_size, 1).astype(np.float32)      # 3表述kernel内元素值得上下限
    gaussian_kernel = np.stack([kernel, kernel, kernel])   # 5*5*3
    gaussian_kernel = np.expand_dims(gaussian_kernel, 1)   # 1*5*5*3
    gaussian_kernel = torch.from_numpy(gaussian_kernel).cuda()  # tensor and cuda
    gaussian_filter = nn.Conv2d(in_channels=channels, out_channels=channels,
                                    kernel_size=kernel_size, groups=channels, bias=False, padding=7)
    gaussian_filter.weight.data = gaussian_kernel          # 高斯滤波,高斯核的赋值
    # 划分数据,多卡攻击
    image_id_list, label_ori_list = get_truth_info(os.path.join(args.input_path,'dev.csv'))
    num_batches = np.int(np.ceil(len(image_id_list)/args.batch_size))                                  # 总共待攻击目标必须可以被BS整除
    gaussian_smoothing = get_gaussian_kernel(kernel_size=5, sigma=1, channels=3, use_cuda=True)        # 高斯核(过滤部分高频信息) 5,1
    print('start atttacking....')
    for k in tqdm(range(0,num_batches)):
        time.sleep(0.1)
        X_ori = torch.zeros(args.batch_size, 3, args.img_size, args.img_size).to(device)     # 输入大小的初始化
        delta = torch.zeros_like(X_ori,requires_grad=True).to(device)                        # 噪声大小的初始化
        for i in range(args.batch_size):
            X_ori[i] = Totensor(Image.open(input_folder + image_id_list[k * args.batch_size + i]))  # 输入大小的赋值
        X_ori = gaussian_smoothing(X_ori)                                                           # 对输入图片进行高斯滤波
        # 获取真实的label信息
        labels_gt = torch.tensor(label_ori_list[k*args.batch_size:(k*args.batch_size+args.batch_size)]).to(device)
        grad_momentum = 0    # 梯度动量法
        for t in range(args.max_iterations):
            g_temp = []
            for tt in range(len(liner_interval)):
                if args.di:
                    X_adv = X_ori + delta
                    X_adv = DI(X_adv)         # di操作
                    X_adv = nn.functional.interpolate(X_adv, (224, 224), mode='bilinear', align_corners=False)        # 插值到224
                else:
                    c = liner_interval[tt]
                    X_adv = X_ori + c * delta  # 如果使用了DI,则不用顶点浮动
                    X_adv = nn.functional.interpolate(X_adv, (224, 224), mode='bilinear', align_corners=False)  # 插值到224
                logits = 0
                for source_model_n, source_model in zip(source_model_names, source_models):
                    logits += source_model(norm(X_adv))               # ensemble操作
                logits /= num_source_models
                loss = -nn.CrossEntropyLoss()(logits,labels_gt)       # 交叉熵
                loss.backward()                                       # 梯度回传
                # MI + TI 操作
                grad_c = delta.grad.clone()                           # 同时使用MI和TI
                grad_c = F.conv2d(grad_c, gaussian_kernel, bias=None, stride=1, padding=(2,2), groups=3)
                #grad_a = grad_c / torch.mean(torch.abs(grad_c), (1, 2, 3), keepdim=True)+0.5*grad_momentum   # 1
                grad_a = grad_c
                grad_momentum = grad_a
                g_temp.append(grad_a)
            g0 = 0.0
            for j in range(7):
                g0 += g_temp[j]      # 求均值,抵消噪声【多次DI随机,消除噪声,保留有效信息】
            g0 = g0 / 7.0
            delta.grad.zero_()                                      # 梯度清零
            # 无穷范数攻击
            delta.data=delta.data-args.lr * torch.sign(g0)
            delta.data=delta.data.clamp(-args.linf_epsilon/255.,args.linf_epsilon/255.)
            delta.data=((X_ori+delta.data).clamp(0,1))-X_ori       # 噪声截取操作

        for i in range(args.batch_size):
            adv_final = (X_ori+delta)[i].cpu().detach().numpy()
            adv_final = (adv_final*255).astype(np.uint8)
            file_path = os.path.join(adv_img_folder, image_id_list[k * args.batch_size + i])
            adv_x_255 = np.transpose(adv_final, (1, 2, 0))
            im = Image.fromarray(adv_x_255)
            im.save(file_path,quality=99)
    torch.cuda.empty_cache()
Exemplo n.º 8
0
def main():

    m = Mask()
    c = Classifier()

    device = torch.device('cuda')

    # IR_50
    model_ir50 = IR_50([112, 112])
    model_ir50.load_state_dict(
        torch.load('./ckpt/backbone_ir50_ms1m_epoch120.pth',
                   map_location='cuda'))
    model_ir50.eval().to(device).zero_grad()

    # IR_152
    model_ir152 = IR_152([112, 112])
    model_ir152.load_state_dict(
        torch.load('./ckpt/Backbone_IR_152_Epoch_112_Batch.pth',
                   map_location='cuda'))
    model_ir152.eval().to(device).zero_grad()

    # IR_SE_50
    model_irse50 = Backbone(50, mode='ir_se')
    model_irse50.load_state_dict(
        torch.load('./ckpt/model_ir_se50.pth', map_location='cuda'))
    model_irse50.eval().to(device).zero_grad()

    eps = (args.max_epsilon / 255.0)
    alpha = eps / args.iterations

    momentum = args.momentum

    kernel = gkern(args.kernlen, args.sig).astype(np.float32)
    stack_kernel = np.stack([kernel, kernel, kernel])
    stack_kernel = np.expand_dims(stack_kernel, 1)
    stack_kernel = torch.Tensor(stack_kernel).to(device)

    counter = 0
    total_distance = 0
    num = 1
    for raw_images, filenames, _ in load_images_with_names(
            args.input_dir, args.batch_size):

        if num * args.batch_size > 712:
            batch_size = 712 - (num - 1) * args.batch_size
        else:
            batch_size = args.batch_size
        num += 1

        in_tensor = process(raw_images)
        raw_variable = in_tensor.detach().to(device)

        # raw embedding
        raw_ir50 = model_ir50(raw_variable)
        raw_ir152 = model_ir152(raw_variable)
        raw_irse50 = model_irse50(raw_variable)

        true_labels = c.classifier(raw_ir50.data.cpu().detach().numpy())

        bias_ir50, bias_ir152, bias_irse50 = found_bias_v2(
            raw_ir50.data.cpu().detach().numpy(),
            raw_ir152.data.cpu().detach().numpy(),
            raw_irse50.data.cpu().detach().numpy(), batch_size)

        perturbation = torch.Tensor(batch_size, 3, 112,
                                    112).uniform_(-0.01, 0.01).to(device)
        in_variable = raw_variable + perturbation
        in_variable.data.clamp_(-1.0, 1.0)
        in_variable.requires_grad = True

        last_grad = 0.0
        momentum_sum = 0.0

        for step in range(args.iterations):

            new_ir50 = model_ir50(in_variable)
            new_ir152 = model_ir152(in_variable)
            new_irse50 = model_irse50(in_variable)

            loss1 = -torch.mean(
                torch.cosine_similarity(x1=raw_ir50, x2=new_ir50, dim=1) * 1.7
                + torch.cosine_similarity(x1=raw_ir152, x2=new_ir152, dim=1) *
                0.35 +
                torch.cosine_similarity(x1=raw_irse50, x2=new_irse50, dim=1) *
                0.65) / 2.7

            loss2 = torch.mean(
                torch.cosine_similarity(
                    x1=torch.from_numpy(bias_ir50).detach().to(device),
                    x2=new_ir50,
                    dim=1) * 1.7 + torch.cosine_similarity(x1=torch.from_numpy(
                        bias_ir152).detach().to(device),
                                                           x2=new_ir152,
                                                           dim=1) * 0.35 +
                torch.cosine_similarity(x1=torch.from_numpy(
                    bias_irse50).detach().to(device),
                                        x2=new_irse50,
                                        dim=1) * 0.65) / 2.7
            loss = loss1 + loss2

            print('loss :', loss)

            loss.backward(retain_graph=True)

            data_grad = in_variable.grad.data

            data_grad = F.conv2d(data_grad,
                                 stack_kernel,
                                 padding=(args.kernlen - 1) // 2,
                                 groups=3)

            for i in range(data_grad.shape[0]):
                data_grad[i] = data_grad[i] / torch.mean(
                    data_grad[i].norm(2, 0) / 1.713)

            if iter == 0:
                noise = data_grad
            else:
                noise = last_grad * momentum + data_grad * 0.9

            last_grad = noise.detach()
            norm = noise.norm(dim=1).unsqueeze(1)
            index = norm.mean()
            momentum_sum = momentum_sum * momentum + 1.0
            d_img = noise * norm * alpha / (momentum_sum * index)
            d_img = d_img / d_img.norm(dim=1).mean() * alpha

            perturb_mask = m.get_perturb_mask(
                new_ir50.data.detach().cpu().numpy(),
                new_ir152.data.detach().cpu().numpy(),
                new_irse50.data.detach().cpu().numpy(), true_labels,
                args.cos_margin)

            in_variable.data = in_variable.data + \
                               d_img * torch.from_numpy(perturb_mask.reshape([batch_size, 1, 1, 1])).to(device).float()

            raw_variable.data = torch.clamp(in_variable.data, -1.0, 1.0)
            in_variable.grad.data.zero_()

        advs = raw_variable.data.cpu().detach().numpy()
        advs = advs.swapaxes(1, 2).swapaxes(2, 3)

        total_distance_ = save_images(raw_images, advs, filenames,
                                      args.output_dir)
        total_distance += total_distance_
        counter += batch_size
        print('attack images num : [%d / 712]' % counter)
    print('mean_dist:', total_distance / 712.0)