def build_egomotion_test_graph(self): """Builds egomotion model reading from placeholders.""" input_image_stack = tf.placeholder( tf.float32, [1, self.img_height, self.img_width, self.seq_length * 3], name='raw_input') input_bottleneck_stack = None if self.imagenet_norm: im_mean = tf.tile( tf.constant(reader.IMAGENET_MEAN), multiples=[self.seq_length]) im_sd = tf.tile( tf.constant(reader.IMAGENET_SD), multiples=[self.seq_length]) input_image_stack = (input_image_stack - im_mean) / im_sd if self.joint_encoder: # Pre-compute embeddings here. with tf.variable_scope('depth_prediction', reuse=True): input_bottleneck_stack = [] encoder_selected = nets.encoder(self.architecture) for i in range(self.seq_length): input_image = input_image_stack[:, :, :, i * 3:(i + 1) * 3] tf.get_variable_scope().reuse_variables() embedding, _ = encoder_selected( target_image=input_image, weight_reg=self.weight_reg, is_training=True) input_bottleneck_stack.append(embedding) input_bottleneck_stack = tf.concat(input_bottleneck_stack, axis=3) with tf.variable_scope('egomotion_prediction'): est_egomotion = nets.egomotion_net( image_stack=input_image_stack, disp_bottleneck_stack=input_bottleneck_stack, joint_encoder=self.joint_encoder, seq_length=self.seq_length, weight_reg=self.weight_reg, same_trans_rot_scaling=self.same_trans_rot_scaling) self.input_image_stack = input_image_stack self.est_egomotion = est_egomotion
def build_egomotion_test_graph(self): """Builds egomotion model reading from placeholders.""" input_image_stack = tf.placeholder( tf.float32, [1, self.img_height, self.img_width, self.seq_length * 3], name='raw_input') input_bottleneck_stack = None if self.imagenet_norm: im_mean = tf.tile( tf.constant(reader.IMAGENET_MEAN), multiples=[self.seq_length]) im_sd = tf.tile( tf.constant(reader.IMAGENET_SD), multiples=[self.seq_length]) input_image_stack = (input_image_stack - im_mean) / im_sd if self.joint_encoder: # Pre-compute embeddings here. with tf.variable_scope('depth_prediction', reuse=True): input_bottleneck_stack = [] encoder_selected = nets.encoder(self.architecture) for i in range(self.seq_length): input_image = input_image_stack[:, :, :, i * 3:(i + 1) * 3] tf.get_variable_scope().reuse_variables() embedding, _ = encoder_selected( target_image=input_image, weight_reg=self.weight_reg, is_training=True) input_bottleneck_stack.append(embedding) input_bottleneck_stack = tf.concat(input_bottleneck_stack, axis=3) with tf.variable_scope('egomotion_prediction'): est_egomotion = nets.egomotion_net( image_stack=input_image_stack, disp_bottleneck_stack=input_bottleneck_stack, joint_encoder=self.joint_encoder, seq_length=self.seq_length, weight_reg=self.weight_reg) self.input_image_stack = input_image_stack self.est_egomotion = est_egomotion
def build_egomotion_test_graph(self): """Builds egomotion model reading from placeholders.""" print('EGOMOTION PROCESS') input_image_stack = tf.placeholder( tf.float32, [1, self.img_height, self.img_width, self.seq_length * 3], name='raw_input') input_bottleneck_stack = None if self.imagenet_norm: im_mean = tf.tile( tf.constant(reader.IMAGENET_MEAN), multiples=[self.seq_length]) im_sd = tf.tile( tf.constant(reader.IMAGENET_SD), multiples=[self.seq_length]) input_image_stack = (input_image_stack - im_mean) / im_sd if self.joint_encoder: # Pre-compute embeddings here. with tf.variable_scope('depth_prediction', reuse=True): input_bottleneck_stack = [] encoder_selected = nets.encoder(self.architecture) for i in range(self.seq_length): input_image = input_image_stack[:, :, :, i * 3:(i + 1) * 3] tf.get_variable_scope().reuse_variables() embedding, _ = encoder_selected( target_image=input_image, weight_reg=self.weight_reg, is_training=True) input_bottleneck_stack.append(embedding) input_bottleneck_stack = tf.concat(input_bottleneck_stack, axis=3) with tf.variable_scope('egomotion_prediction'): est_egomotion = nets.egomotion_net( image_stack=input_image_stack, disp_bottleneck_stack=input_bottleneck_stack, joint_encoder=self.joint_encoder, seq_length=self.seq_length, weight_reg=self.weight_reg) # sess = tf.Session() # with sess.as_default(): # tensor_new = sess.run(est_egomotion) # print(tensor_new) # with tf.Session() as sess: # init = tf.global_variables_initializer() # sess.run(init) # print(est_egomotion.eval()) print('oke oce') print(est_egomotion.get_shape()) # Call the shape of the tensor # x = tf.Print(est_egomotion, [est_egomotion]) # sess = tf.InteractiveSession() # sess.run(x) # aselole = tf.Print(est_egomotion, [est_egomotion], "ehehhe") # sess = tf.Session() # print(sess.run(aselole)) print('Flag 1') print('est_egomotionssss = ', est_egomotion) print('Flag 2') print(est_egomotion) # sess = tf.Session() # with sess.as_default(): # tensor_new = sess.run(egomotion_prediction) # print(tensor_new) self.input_image_stack = input_image_stack self.est_egomotion = est_egomotion print('est_egomotionssss = ', self.est_egomotion)