def stp_transformation(prev_image, stp_input, num_masks, reuse=None):
    """Apply spatial transformer predictor (STP) to previous image.

    Args:
      prev_image: previous image to be transformed.
      stp_input: hidden layer to be used for computing STN parameters.
      num_masks: number of masks and hence the number of STP transformations.
    Returns:
      List of images transformed by the predicted STP parameters.
    """
    # Only import spatial transformer if needed.
    from transformer.spatial_transformer import transformer

    identity_params = tf.convert_to_tensor(
        np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
    transformed = []
    for i in range(num_masks):
        params = slim.layers.fully_connected(stp_input,
                                             6,
                                             scope='stp_params' + str(i),
                                             activation_fn=None,
                                             reuse=reuse) + identity_params
        outsize = (prev_image.get_shape()[1], prev_image.get_shape()[2])
        transformed.append(transformer(prev_image, params, outsize))

    return transformed
Пример #2
0
def sp_camera(image,z,options,reuse=False,name="sp_camera"):
	with tf.variable_scope(name):
		if reuse:
			tf.get_variable_scope().reuse_variables()
		else:
			assert tf.get_variable_scope().reuse == False

		#h0 = lrelu(conv2d(image, options.df_dim, name='ci_h0_conv_sp'))
		#h1 = lrelu(instance_norm(conv2d(h0, options.df_dim*1,7,7, name='ci_h1_conv_sp'), 'ci_bn1_sp'))
		#h2 = lrelu(instance_norm(conv2d(h1, options.df_dim*2,7,7, name='ci_h2_conv_sp'), 'ci_bn2_sp'))
		#h3 = lrelu(instance_norm(conv2d(h2, options.df_dim*4,6,6, name='ci_h3_conv_sp'), 'ci_bn3_sp'))
		#h4 = lrelu(instance_norm(conv2d(h3, options.df_dim*4,6,6, name='ci_h4_conv_sp'), 'ci_bn4_sp'))
		#h5 = lrelu(instance_norm(conv2d(h4, options.df_dim*8, name='ci_h5_conv_sp'), 'ci_bn5_sp'))
	   


		h0 = conv2d(image, options.df_dim,3,2, name='ci_h0_conv_sp')
		h1 = instance_norm(conv2d(lrelu(h0), options.df_dim / 2.0, 3, 2,name='ci_h1_conv_sp'), 'ci_bn1_sp')
		h2 = instance_norm(conv2d(lrelu(h1), options.df_dim / 2.0, 3,2, name='ci_h2_conv_sp'), 'ci_bn2_sp')
		h3 = instance_norm(conv2d(lrelu(h2), options.df_dim,3,2, name='ci_h3_conv_sp'), 'ci_bn3_sp')
		h4 = instance_norm(conv2d(lrelu(h3), options.df_dim,2,2, name='ci_h4_conv_sp'), 'ci_bn4_sp')
		h5 = instance_norm(conv2d(lrelu(h4), options.df_dim*2,2,2, name='ci_h5_conv_sp'), 'ci_bn5_sp')
		h6 = instance_norm(conv2d(lrelu(h5), options.df_dim*2,2,2, name='ci_h6_conv_sp'), 'ci_bn6_sp')
				
		h6_1 = tf.reshape(h6,(tf.shape(h6)[0],options.df_dim*2))
		h6_2 = linear(h6_1,30,'lin0_sp')
		h7 = tf.concat([h6_2,z],axis=-1)

		W1_fc_loc1 = weight_variable([h7.shape[1], 3])
		tmp_s = 0
		tmp_t = 0

		initial1 = np.array([[tmp_s, tmp_t , tmp_t]]).astype('float32').flatten()
		b1_fc_loc1 = tf.Variable(initial_value=initial1, name='b1_fc_loc2_sp')
		feat = tf.nn.tanh(tf.matmul(h7, W1_fc_loc1) + b1_fc_loc1)
		#feat = tf.clip_by_value(feat,0,0.3)

		h1_fc_loc2 = tf.multiply(feat, tf.constant(1.0))

		out_size = (options.crop_size, options.crop_size)
		

		
		tf_s = []
		tf_tx = []
		tf_ty = []
		mtx = []
		h_trans = []

		for i in range(0,8):
			tf_s.append( tf.expand_dims(tf.add(tf.multiply(h1_fc_loc2[:,0],tf.constant(float(i))), tf.constant(0.76),name='sp_s'+str(i)),-1) )
			tf_tx.append( tf.expand_dims(tf.add(tf.multiply(h1_fc_loc2[:,1],tf.constant(float(i) )) , tf.constant(0.0),name='sp_tx'+str(i)),-1) )
			tf_ty.append( tf.expand_dims(tf.add(tf.multiply(h1_fc_loc2[:,2],tf.constant(float(i) )), tf.constant(0.0),name='sp_ty'+str(i)),-1) )
			mtx.append( tf.concat([tf_s[i],tf.zeros([tf.shape(h6)[0],1]),tf_tx[i],tf.zeros([tf.shape(h6)[0],1]),tf_s[i],tf_ty[i]],axis=1) )
			h_trans.append( transformer(image[:,:,:,0:3], mtx[i], out_size) )
			h_trans[i] = tf.reshape( h_trans[i],[tf.shape(h6)[0], 1,options.crop_size,options.crop_size,3] )

		return mtx,h_trans
Пример #3
0
    def _build_model(self):
        # self.real_data = tf.placeholder(tf.float32,
        #                                 [None, self.image_size, self.image_size,
        #                                  self.input_c_dim + self.output_c_dim],
        #                                 name='real_A_and_B_images')
        self.real_data_video = tf.placeholder(
            tf.float32,
            [
                None,
                self.frames_nb,
                self.frame_h,
                self.frame_w,  #
                self.input_c_dim
            ],
            name='real_videos')
        self.real_data_image_c = tf.placeholder(
            tf.float32,
            [None, self.image_size, self.image_size, self.input_i_dim],
            name='real_c_images')

        self.real_data_image = tf.placeholder(
            tf.float32,
            [self.options.batch_size, self.image_size, self.image_size, 3],
            name='real_images')
        self.fake_data_image = tf.placeholder(
            tf.float32,
            [self.options.batch_size, self.image_size, self.image_size, 3],
            name='fake_images')
        noise = tf.random_normal(shape=self.real_data_image.get_shape(),
                                 mean=0.0,
                                 stddev=0.005,
                                 dtype=tf.float32)
        self.real_data_image = self.real_data_image + noise
        self.fake_data_image = self.fake_data_image + noise

        self.z = tf.placeholder(tf.float32, [None, self.z_dim])
        self.diff = tf.placeholder(
            tf.float32,
            [None, self.image_size, self.image_size, self.input_i_dim])

        crop_s = 128
        out_size = (crop_s, crop_s)
        mtx0 = tf.tile(tf.constant(np.array([[0.76, 0, 0, 0, 0.76, 0]])),
                       [self.options.batch_size, 1])
        mtx1 = tf.tile(tf.constant(np.array([[1, 0, 0, 0, 1, 0]])),
                       [self.options.batch_size, 1])
        self.real_image_crop = tf.reshape(
            transformer(self.real_data_image, mtx1,
                        (self.options.image_size, self.options.image_size)),
            [self.options.batch_size, 128, 128, 3])
        self.real_iamge_merge = tf.concat(
            [self.real_image_crop, self.real_data_image], axis=-1)

        self.img_patch, self.trans_list, self.g4, self.mask1, self.mask2, self.mask3, self.gb, self.fake_A_static, self.m1_gb, self.m2_gf, self.m3_im = self.generatorA(
            self,
            self.real_image_crop,
            self.z,
            None,
            self.options,
            False,
            name="generatorB2A")

        self.mtx, self.fake_camera_movement = self.generator_Camera(self, self.real_data_image, self.z,\
         self.options, False, name="generator_camera")

        flag_32 = False
        self.combined_v = []
        for i in range(self.frames_nb):
            self.combined_v.append(
                transformer(self.fake_A_static[:, i], mtx1, out_size))
            self.combined_v[i] = tf.expand_dims(self.combined_v[i], 1)

        self.combined_v = self.fake_A_static

        self.combined_v_tf = tf.reshape(
            tf.concat(self.combined_v, axis=1),
            [self.batch_size, self.frames_nb, crop_s, crop_s, 3])
        self.fake_A = self.combined_v_tf
        #self.fake_A_staitc
        #self.fake_camera_movement_tf
        #self.combined_v_tf
        # ------------------------------------------------------------

        #self.real_video_camera = []
        #for i in range(self.frames_nb):
        #	self.real_video_camera.append( transformer(self.real_data_video[:,i], mtx1, out_size) )
        #	self.real_video_camera[i] = tf.expand_dims(self.real_video_camera[i],1)
        #self.real_video_camera_tf = self.real_video_camera[0]
        #for i in range(1,self.options.frames_nb):
        #	self.real_video_camera_tf = tf.concat([self.real_video_camera_tf,self.real_video_camera[i]],axis=1)
        #self.real_video_camera_tf = tf.reshape(self.real_video_camera_tf,(-1,self.options.frames_nb)+out_size+(3,))

        self.real_video_tf = self.real_data_video  # self.real_video_camera_tf

        #self.real_video_camera_tf
        #self.real_video_tf
        self.real_A = self.real_data_video
        #self.fake_A_sample = tf.placeholder(tf.float32,
        #									[None, self.frames_nb, crop_s, crop_s, #self.frames_nb,
        #									 self.input_c_dim], name='fake_A_sample')

        self.disc_fake = []
        self.DA_fake = []

        self.disc_real, self.DA_real = self.discriminatorA(
            self.real_video_tf,
            self.real_image_crop,
            self.options,
            reuse=False,
            name="discriminatorA")
        self.disc_wi_rv, self.DA_wi_rv = self.discriminatorA(
            self.real_video_tf,
            self.fake_data_image,
            self.options,
            reuse=True,
            name="discriminatorA")
        self.disc_fake, self.DA_fake = self.discriminatorA(
            self.combined_v_tf,
            self.real_data_image,
            self.options,
            reuse=True,
            name="discriminatorA")
        fake_logit = self.DA_fake
        true_logit = self.DA_real

        #fake_c_logit = self.DA_fake_camera
        #true_c_logit = self.DA_real_camera
        #self.da_loss_real = self.criterionGAN(self.disc_real, tf.ones_like(self.disc_real))
        #self.da_loss_fake = self.criterionGAN(self.disc_fake, tf.zeros_like(self.disc_fake))
        #self.da_loss_wi_rv = self.criterionGAN(self.disc_wi_rv, tf.zeros_like(self.disc_wi_rv))
        #self.d_loss = self.da_loss_real + (self.da_loss_fake + self.da_loss_wi_rv) / 2

        self.d_loss_true = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_real,
                                                    labels=tf.ones_like(
                                                        self.disc_real)))
        self.d_loss_wi_rv = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_wi_rv,
                                                    labels=tf.zeros_like(
                                                        self.disc_wi_rv)))
        self.d_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.disc_fake,
                                                    labels=tf.zeros_like(
                                                        self.disc_fake)))
        #self.d_loss_fake = 0
        #if flag_32 == True:
        #	for i in range(8):
        #		tmp = self.criterionGAN(self.disc_fake[i], tf.zeros_like(self.disc_fake[i]))
        #		self.d_loss_fake += tmp
        #	self.d_loss = self.d_loss_true + self.d_loss_fake / 8.0
        self.d_loss = self.d_loss_true + self.d_loss_fake + self.d_loss_wi_rv
        #self.d_loss = tf.reduce_mean(self.DA_wi_rv) + tf.reduce_mean(fake_logit) - tf.reduce_mean(true_logit)
        #+ abs_criterion(self.combined_v_tf,self.real_video_tf)
        self.g_loss_l1 = 0.6 * tf.reduce_mean(
            tf.abs(self.real_video_tf - self.combined_v_tf))
        self.g_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=self.disc_fake, labels=tf.ones_like(
                    self.disc_fake))) + self.g_loss_l1

        ##

        ## (1,2,/16,0) -- no motion
        ## (0.01,2,/16,0) -- no motion
        ## (5.0,2,/16,0)-- no motion
        ## (5.0,2,/16,2)-- some motion
        ## (2.0,2,/16,2)-- no motion
        ## (2.0, 1.5,/16, 1.0) -- no motion
        ## (0.5, 1.5, /8, 0.1) -- current
        ## (1, 0.005, 1.0, /16.0, 0.01)

        #self.d_c_loss = tf.reduce_mean(true_c_logit - fake_c_logit)
        #self.g_c_loss = tf.reduce_mean(fake_c_logit)

        ###TensorBoard visualization###
        self.z_sum = tf.summary.histogram("z", self.z)
        self.true_sum = tf.summary.histogram("d", true_logit)
        self.fake_sum = tf.summary.histogram("d_", fake_logit)
        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
        self.imaginary_sum = video_summary("imaginary", self.fake_A,
                                           self.frames_nb)
        #self.d_gp_loss = tf.summary.scalar("d_gp_loss",gradient_penalty)

        self.g_sum = tf.summary.merge(
            [self.z_sum, self.fake_sum, self.imaginary_sum, self.g_loss_sum])
        self.d_sum = tf.summary.merge(
            [self.z_sum, self.true_sum, self.d_loss_sum])

        t_vars = tf.trainable_variables()
        #self.db_vars = [var for var in t_vars if 'discriminatorB' in var.name]
        self.da_vars = [var for var in t_vars if 'discriminatorA' in var.name]
        self.g_vars_b2a_camera = [
            var for var in t_vars if 'generator_camera' in var.name
        ]
        self.d_vars_camera = [
            var for var in t_vars if 'discriminator_Camera' in var.name
        ]

        #self.g_vars_a2b = [var for var in t_vars if 'generatorA2B' in var.name]
        self.g_vars_b2a = [var for var in t_vars if 'generatorB2A' in var.name]
        self.d_clamp_op = [
            tf.assign(
                var, tf.clip_by_value(var, self.clamp_lower, self.clamp_upper))
            for var in self.da_vars
        ]
        self.d_c_clamp_op = [
            tf.assign(
                var, tf.clip_by_value(var, self.clamp_lower, self.clamp_upper))
            for var in self.d_vars_camera
        ]

        for var in t_vars:
            print(var.name)
Пример #4
0
    def transform_maps(global_maps, particle_states, local_map_size):
        """
        Implements global to local map transformation
        :param global_maps: tf op (batch, None, None, ch) global map input
        :param particle_states: tf op (batch, K, 3) particle states that define local views for the transformation
        :param local_map_size: tuple, (height, widght), size of the output local maps
        :return: tf op (batch, K, local_map_size[0], local_map_size[1], ch). local maps, each shows a
          different transformation of the global map corresponding to the particle states
        """
        batch_size, num_particles = particle_states.get_shape().as_list()[:2]
        total_samples = batch_size * num_particles
        flat_states = tf.reshape(particle_states, [total_samples, 3])

        # define some helper variables
        input_shape = tf.shape(global_maps)
        global_height = tf.cast(input_shape[1], tf.float32)
        global_width = tf.cast(input_shape[2], tf.float32)
        height_inverse = 1.0 / global_height
        width_inverse = 1.0 / global_width
        # at tf1.6 matmul still does not support broadcasting, so we need full vectors
        zero = tf.constant(0, dtype=tf.float32, shape=(total_samples, ))
        one = tf.constant(1, dtype=tf.float32, shape=(total_samples, ))

        # the global map will be down-scaled by some factor
        window_scaler = 8.0

        # normalize orientations and precompute cos and sin functions
        theta = -flat_states[:, 2] - 0.5 * np.pi
        costheta = tf.cos(theta)
        sintheta = tf.sin(theta)

        # construct an affine transformation matrix step-by-step.
        # 1, translate the global map s.t. the center is at the particle state
        translate_x = (flat_states[:, 0] * width_inverse * 2.0) - 1.0
        translate_y = (flat_states[:, 1] * height_inverse * 2.0) - 1.0

        transm1 = tf.stack(
            (one, zero, translate_x, zero, one, translate_y, zero, zero, one),
            axis=1)
        transm1 = tf.reshape(transm1, (total_samples, 3, 3))

        # 2, rotate map s.t. the orientation matches that of the particles
        rotm = tf.stack((costheta, sintheta, zero, -sintheta, costheta, zero,
                         zero, zero, one),
                        axis=1)
        rotm = tf.reshape(rotm, (total_samples, 3, 3))

        # 3, scale down the map
        scale_x = tf.fill(
            (total_samples, ),
            float(local_map_size[1] * window_scaler) * width_inverse)
        scale_y = tf.fill(
            (total_samples, ),
            float(local_map_size[0] * window_scaler) * height_inverse)

        scalem = tf.stack(
            (scale_x, zero, zero, zero, scale_y, zero, zero, zero, one),
            axis=1)
        scalem = tf.reshape(scalem, (total_samples, 3, 3))

        # 4, translate the local map s.t. the particle defines the bottom mid-point instead of the center
        translate_y2 = tf.constant(-1.0,
                                   dtype=tf.float32,
                                   shape=(total_samples, ))

        transm2 = tf.stack(
            (one, zero, zero, zero, one, translate_y2, zero, zero, one),
            axis=1)
        transm2 = tf.reshape(transm2, (total_samples, 3, 3))

        # chain the transformation matrices into a single one: translate + rotate + scale + translate
        transform_m = tf.matmul(tf.matmul(tf.matmul(transm1, rotm), scalem),
                                transm2)

        # reshape to the format expected by the spatial transform network
        transform_m = tf.reshape(transform_m[:, :2],
                                 (batch_size, num_particles, 6))

        # do the image transformation using the spatial transform network
        # iterate over particle to avoid tiling large global maps
        output_list = []
        for i in range(num_particles):
            output_list.append(
                transformer(global_maps, transform_m[:, i], local_map_size))

        local_maps = tf.stack(output_list, axis=1)

        # set shape information that is lost in the spatial transform network
        local_maps = tf.reshape(
            local_maps, (batch_size, num_particles, local_map_size[0],
                         local_map_size[1], global_maps.shape.as_list()[-1]))

        return local_maps
Пример #5
0
    def _build_model(self):
        # self.real_data = tf.placeholder(tf.float32,
        #                                 [None, self.image_size, self.image_size,
        #                                  self.input_c_dim + self.output_c_dim],
        #                                 name='real_A_and_B_images')
        self.real_data_video = tf.placeholder(
            tf.float32,
            [
                None,
                self.frames_nb,
                self.frame_h,
                self.frame_w,  #
                self.input_c_dim
            ],
            name='real_videos')
        self.real_data_image_c = tf.placeholder(
            tf.float32,
            [None, self.image_size, self.image_size, self.input_i_dim],
            name='real_c_images')

        self.real_data_image = tf.placeholder(
            tf.float32, [None, self.image_size, self.image_size, 3],
            name='real_images')
        self.fake_data_image = tf.placeholder(
            tf.float32, [None, self.image_size, self.image_size, 3],
            name='fake_images')

        self.z = tf.placeholder(tf.float32, [None, 100])
        self.diff = tf.placeholder(
            tf.float32,
            [None, self.image_size, self.image_size, self.input_i_dim])

        self.g4, self.mask, self.gb, self.fake_A_static = self.generatorA(
            self,
            self.real_data_image,
            self.z,
            None,
            self.options,
            False,
            name="generatorB2A")

        self.mtx, self.fake_camera_movement = self.generator_Camera(self, self.real_data_image, self.z,\
         self.options, False, name="generator_camera")

        crop_s = 96
        out_size = (crop_s, crop_s)
        self.combined_v = []
        for i in range(self.options.frames_nb):
            self.combined_v.append(
                transformer(self.fake_A_static[:, i], self.mtx[i], out_size))
            self.combined_v[i] = tf.expand_dims(self.combined_v[i], 1)

        self.combined_v_tf = self.combined_v[0]
        self.fake_camera_movement_tf = self.fake_camera_movement[0]
        for i in range(1, self.options.frames_nb):
            self.combined_v_tf = tf.concat(
                [self.combined_v_tf, self.combined_v[i]], axis=1)
            self.fake_camera_movement_tf = tf.concat(
                [self.fake_camera_movement_tf, self.fake_camera_movement[i]],
                axis=1)
        self.combined_v_tf = tf.reshape(self.combined_v_tf,
                                        (-1, self.options.frames_nb) +
                                        out_size + (3, ))
        self.fake_A = self.combined_v_tf

        #self.fake_A_staitc
        #self.fake_camera_movement_tf
        #self.combined_v_tf
        # ------------------------------------------------------------
        mtx0 = tf.tile(tf.constant(np.array([[0, 0.76, 0, 0.76, 0, 0]])),
                       [self.options.batch_size, 1])
        self.real_video_camera = []
        for i in range(self.frames_nb):
            self.real_video_camera.append(
                transformer(self.real_data_video[:, i], mtx0, out_size))
            self.real_video_camera[i] = tf.expand_dims(
                self.real_video_camera[i], 1)
        self.real_video_camera_tf = self.real_video_camera[0]
        for i in range(1, self.options.frames_nb):
            self.real_video_camera_tf = tf.concat(
                [self.real_video_camera_tf, self.real_video_camera[i]], axis=1)
        self.real_video_camera_tf = tf.reshape(self.real_video_camera_tf,
                                               (-1, self.options.frames_nb) +
                                               out_size + (3, ))

        self.real_video_tf = self.real_video_camera_tf

        #self.real_video_camera_tf
        #self.real_video_tf

        self.real_A = self.real_data_video
        self.fake_A_sample = tf.placeholder(
            tf.float32,
            [
                None,
                self.frames_nb,
                crop_s,
                crop_s,  #self.frames_nb, 
                self.input_c_dim
            ],
            name='fake_A_sample')

        #self.disc_c, self.D_camera = self.discriminatorA(self.fake_camera_movement_tf, self.real_data_image, self.options, reuse=False, name="discriminator_Camera")
        #self.disc_c_fake, self.DA_fake_camera = self.discriminatorA(self.fake_camera_movement_tf, self.real_data_image, self.options, reuse=True, name="discriminator_Camera")
        #self.disc_c_true, self.DA_real_camera = self.discriminatorA(self.real_video_camera_tf, self.real_data_image, self.options, reuse=True, name="discriminator_Camera")

        #self.da_c_loss_real = self.criterionGAN(self.DA_real_camera, tf.ones_like(self.DA_real_camera))
        #self.da_c_loss_fake = self.criterionGAN(self.DA_fake_camera, tf.zeros_like(self.DA_fake_camera))
        #self.da_c_loss = (self.da_c_loss_real + self.da_c_loss_fake) / 2.0
        #self.da_camera_sum = tf.summary.scalar("da_camera_loss", self.da_c_loss)
        #self.dc_sum = tf.summary.merge(
        #	[self.da_camera_sum]
        #)

        self.disc_fake, self.DA_fake = self.discriminatorA(
            self.combined_v_tf,
            self.real_data_image,
            self.options,
            reuse=False,
            name="discriminatorA")
        self.disc_wi_rv, self.DA_wi_rv = self.discriminatorA(
            self.real_video_tf,
            self.fake_data_image,
            self.options,
            reuse=True,
            name="discriminatorA")
        self.disc_real, self.DA_real = self.discriminatorA(
            self.real_video_tf,
            self.real_data_image,
            self.options,
            reuse=True,
            name="discriminatorA")

        fake_logit = self.DA_fake
        true_logit = self.DA_real

        #fake_c_logit = self.DA_fake_camera
        #true_c_logit = self.DA_real_camera
        #self.da_loss_real = self.criterionGAN(self.disc_real, tf.ones_like(self.disc_real))
        #self.da_loss_fake = self.criterionGAN(self.disc_fake, tf.zeros_like(self.disc_fake))
        #self.da_loss_wi_rv = self.criterionGAN(self.disc_wi_rv, tf.zeros_like(self.disc_wi_rv))
        #self.d_loss = self.da_loss_real + (self.da_loss_fake + self.da_loss_wi_rv) / 2

        self.d_loss_true = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=true_logit, labels=tf.ones_like(true_logit)))
        self.d_loss_wi_rv = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=self.DA_wi_rv,
                                                    labels=tf.zeros_like(
                                                        self.DA_wi_rv)))
        self.d_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=fake_logit, labels=tf.zeros_like(fake_logit)))
        #self.d_loss = self.d_loss_true + self.d_loss_fake
        self.d_loss = 0 * tf.reduce_mean(self.DA_wi_rv) + tf.reduce_mean(
            fake_logit) - tf.reduce_mean(true_logit)
        #+ abs_criterion(self.combined_v_tf,self.real_video_tf)
        #self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( logits=fake_logit , labels=tf.ones_like(fake_logit) )) +\
        #100 * tf.reduce_mean(tf.abs(self.real_video_tf - self.combined_v_tf))
        self.g_loss = -tf.reduce_mean(
            fake_logit
        )  #+ tf.reduce_mean(tf.abs(self.real_video_tf - self.combined_v_tf))
        self.g_loss_l1 = abs_criterion(self.combined_v_tf, self.real_video_tf)
        #self.g_loss = self.criterionGAN(self.disc_fake, tf.ones_like(self.disc_fake))# \
        #+ 10 * self.g_loss_l1

        #alpha = tf.random_uniform(
        #	shape=[self.options.batch_size,1],
        #	minval=0.,
        #	maxval=1.
        #	)
        #differences = self.combined_v_tf - self.real_video_tf
        #self.interpolates = self.real_video_tf + differences
        #_, tmp = self.discriminatorA(self.interpolates, self.real_data_image, self.options, reuse=True, name="discriminatorA")
        #gradients = tf.gradients(tmp, [self.interpolates])[0]
        #slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
        #gradient_penalty = tf.reduce_mean((slopes-1.)**2)
        #self.d_loss += 10*gradient_penalty

        #self.d_c_loss = tf.reduce_mean(true_c_logit - fake_c_logit)
        #self.g_c_loss = tf.reduce_mean(fake_c_logit)

        ###TensorBoard visualization###
        self.z_sum = tf.summary.histogram("z", self.z)
        self.true_sum = tf.summary.histogram("d", true_logit)
        self.fake_sum = tf.summary.histogram("d_", fake_logit)
        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
        self.imaginary_sum = video_summary("imaginary", self.fake_A,
                                           self.frames_nb)
        #self.d_gp_loss = tf.summary.scalar("d_gp_loss",gradient_penalty)

        ###TensorBoard visualization###
        #self.z_c_sum = tf.summary.histogram("z_c", self.z)
        #self.true_c_sum = tf.summary.histogram("d_c", true_c_logit)
        #self.fake_c_sum = tf.summary.histogram("d_c", fake_c_logit)
        #self.g_c_loss_sum = tf.summary.scalar("g_c_loss", self.g_c_loss)
        #self.d_c_loss_sum = tf.summary.scalar("d_c_loss", self.d_c_loss)
        #self.imaginary_c_sum = video_summary("imaginary_c", self.fake_camera_movement_tf,self.frames_nb)

        self.g_sum = tf.summary.merge(
            [self.z_sum, self.fake_sum, self.imaginary_sum, self.g_loss_sum])
        self.d_sum = tf.summary.merge(
            [self.z_sum, self.true_sum, self.d_loss_sum])

        #self.g_c_sum = tf.summary.merge([self.z_c_sum, self.fake_c_sum, self.imaginary_c_sum, self.g_c_loss_sum])
        #self.d_c_sum = tf.summary.merge([self.z_c_sum, self.true_c_sum, self.d_c_loss_sum])

        #self.da_loss_real = self.criterionGAN(self.DA_real, tf.ones_like(self.DA_real))
        #self.da_loss_fake = self.criterionGAN(self.DA_fake_sample, tf.zeros_like(self.DA_fake_sample))
        #self.da_loss = (self.da_loss_real + self.da_loss_fake) / 2.0
        #self.da_loss_sum = tf.summary.scalar("da_loss", self.da_loss)
        #self.da_loss_real_sum = tf.summary.scalar("da_loss_real", self.da_loss_real)
        #self.da_loss_fake_sum = tf.summary.scalar("da_loss_fake", self.da_loss_fake)
        #self.da_sum = tf.summary.merge(
        #   [self.da_loss_sum, self.da_loss_real_sum, self.da_loss_fake_sum]
        #)

        #self.g_camera_movement_loss = abs_criterion(self.fake_camera_movement_tf,self.real_video_camera_tf)
        #self.g_anime_l1_loss = abs_criterion(self.combined_v_tf, self.real_video_camera_tf)
        #self.g_abs_b2a = abs_criterion(self.fake_A, self.real_video_tf)
        #self.g_dis_camera_movemet_loss = self.criterionGAN(self.D_camera, tf.ones_like(self.D_camera))
        #self.g_loss_b2a_camera = self.g_dis_camera_movemet_loss
        #self.g_dis_b2a_loss = self.criterionGAN(self.DA_fake, tf.ones_like(self.DA_fake))
        #self.g_loss_b2a = self.g_dis_b2a_loss

        #		self.db_loss_real = self.criterionGAN(self.DB_real, tf.ones_like(self.DB_real))
        #		self.db_loss_fake = self.criterionGAN(self.DB_fake_sample, tf.zeros_like(self.DB_fake_sample))
        #		self.db_loss = (self.db_loss_real + self.db_loss_fake) / 2

        #		self.g_a2b_sum = tf.summary.scalar("g_loss_a2b", self.g_loss_a2b)
        #self.g_abs_b2a_camera_sum = tf.summary.scalar("g_camera_movement_loss",self.g_camera_movement_loss)
        #self.g_b2a_loss_sum = tf.summary.scalar("g_loss_b2a", self.g_loss_b2a)
        #self.g_fm_b2a_sum = None#tf.summary.scalar("g_fm_loss_b2a", self.g_fm_b2a)
        #self.g_abs_b2a_sum = tf.summary.scalar("g_abs_b2a_sum", self.g_anime_l1_loss)
        #self.g_dis_b2a_sum = tf.summary.scalar("g_dis_b2a_sum", self.g_dis_b2a_loss)
        #self.g_ms_loss = None #tf.summary.scalar("g_ms_loss",self.g_ms_loss)

        #self.g_b2a_sum = tf.summary.merge(
        #	[self.g_b2a_loss_sum,self.g_dis_b2a_sum,self.g_abs_b2a_sum,self.g_abs_b2a_camera_sum]
        #)

        t_vars = tf.trainable_variables()
        #self.db_vars = [var for var in t_vars if 'discriminatorB' in var.name]
        self.da_vars = [var for var in t_vars if 'discriminatorA' in var.name]
        self.g_vars_b2a_camera = [
            var for var in t_vars if 'generator_camera' in var.name
        ]
        self.d_vars_camera = [
            var for var in t_vars if 'discriminator_Camera' in var.name
        ]

        #self.g_vars_a2b = [var for var in t_vars if 'generatorA2B' in var.name]
        self.g_vars_b2a = [var for var in t_vars if 'generatorB2A' in var.name]
        self.d_clamp_op = [
            tf.assign(
                var, tf.clip_by_value(var, self.clamp_lower, self.clamp_upper))
            for var in self.da_vars
        ]
        self.d_c_clamp_op = [
            tf.assign(
                var, tf.clip_by_value(var, self.clamp_lower, self.clamp_upper))
            for var in self.d_vars_camera
        ]

        for var in t_vars:
            print(var.name)