Ejemplo n.º 1
0
def split_template_source(template_data,
                          batch_euler_poses,
                          NUM_POINT,
                          centroid_subtraction_switch,
                          ADD_NOISE,
                          S_RAND_POINTS,
                          SPARSE=1):
    if np.random.random_sample() < S_RAND_POINTS:
        #sparse:
        if SPARSE == 1:
            template_data = template_data[:, :(2 * NUM_POINT), ...]
        elif SPARSE == 2:
            template_data = template_data[:, :(4 * NUM_POINT), ...]

        template_data = helper.select_random_points(template_data,
                                                    (2 * NUM_POINT))
        source_data = template_data[:, NUM_POINT:(2 * NUM_POINT), ...]
        template_data = template_data[:, :NUM_POINT, ...]
    else:
        source_data = template_data[:, :(NUM_POINT), ...]
        template_data = template_data[:, :NUM_POINT, ...]

    source_data = helper.apply_transformation(
        source_data, batch_euler_poses
    )  # Apply the poses on the templates to get source data.

    if centroid_subtraction_switch:
        source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
    # template_data = template_data - np.mean(template_data, axis=1, keepdims=True)

    if np.random.random_sample() < ADD_NOISE:
        source_data = helper.add_noise(source_data)

    # Only chose limited number of points from the source and template data.
    source_data = source_data[:, 0:NUM_POINT, :]
    template_data = template_data[:, 0:NUM_POINT, :]

    # # To visualize the source and point clouds:
    # if display_ptClouds:
    # 	helper.display_clouds_data(source_data[0])
    # 	helper.display_clouds_data(template_data[0])

    return template_data, source_data
Ejemplo n.º 2
0
def eval_one_epoch(sess, ops, eval_writer, templates, poses):
	# Arguments:
	# sess: 		Tensorflow session to handle tensors.
	# ops:			Dictionary for tensors of Network
	# templates:	Training Point Cloud data.
	# poses: 		Training pose data.

	is_training = False
	display_ptClouds = False
	display_poses = False
	display_poses_in_itr = False
	display_ptClouds_in_itr = False

	#templates = helper.shuffle_templates(templates)
	#poses = helper.shuffle_poses(poses)

	loss_sum = 0			
	#poses = poses[0:4000,:]								# Total Loss in each batch.
	num_batches = int(poses.shape[0]/BATCH_SIZE) 				# Number of batches in an epoch.
	#num_batches=2
	
	for fn in range(num_batches):
		#shuffled_poses = helper.shuffle_poses(poses)

		start_idx = fn*BATCH_SIZE 			# Start index of poses.
		end_idx = (fn+1)*BATCH_SIZE 		# End index of poses.
		
		#template_data = np.copy(templates[start_idx:end_idx])
		template_data = np.copy(templates[0,:,:]).reshape(1,-1,3)
		template_data = np.tile(template_data, (BATCH_SIZE, 1, 1))

		batch_euler_poses = poses[0:BATCH_SIZE,:]			# Extract poses for batch training.
		source_data = helper.apply_transformation(template_data, batch_euler_poses)		# Apply the poses on the templates to get source data.

		if centroid_subtraction_switch:
			source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
			template_data = template_data - np.mean(template_data, axis=1, keepdims=True)

		if FLAGS.use_partial_data:
			complete_source_data = np.copy(source_data)
			source_data = helper.find_partial_data(complete_source_data)
			template_data = helper.find_partial_data(template_data)

		# Chose Random Points from point clouds for training.
		if np.random.random_sample()<0.0:
			source_data = helper.select_random_points(source_data, NUM_POINT)						# 30% probability that source data has different points than template
		else:
			source_data = source_data[:,0:NUM_POINT,:]
		if np.random.random_sample()<ADD_NOISE:
			source_data = helper.add_noise(source_data)	

		# Only chose limited number of points from the source and template data.
		source_data = source_data[:,0:NUM_POINT,:]
		template_data = template_data[:,0:NUM_POINT,:]

		template_voxel = helper.voxelization(template_data, size=32)

		# To visualize the source and point clouds:
		if display_ptClouds:
			helper.display_clouds_data(source_data[0])
			helper.display_clouds_data(template_data[0])

		TRANSFORMATIONS = np.identity(4)				# Initialize identity transformation matrix.
		TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS,BATCH_SIZE,1).reshape(BATCH_SIZE,4,4)		# Intialize identity matrices of size equal to batch_size

		# Iterations for pose refinement.
		for loop_idx in range(MAX_LOOPS-1):
			source_voxel = helper.voxelization(source_data, size=32)
			# 4a
			# Feed the placeholders of Network with template data and source data.
			feed_dict = {ops['source_pointclouds_pl']: source_voxel,
						 ops['template_pointclouds_pl']: template_voxel,
						 ops['is_training_pl']: is_training}
			predicted_transformation = sess.run([ops['predicted_transformation']], feed_dict=feed_dict)		# Ask the network to predict the pose.

			# 4b,4c
			# Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
			if FLAGS.use_partial_data:
				TRANSFORMATIONS, complete_source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, complete_source_data)
				source_data = helper.find_partial_data(complete_source_data)
			else:
				TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)

			# Display Results after each iteration.
			if display_poses_in_itr:
				print(predicted_transformation[0,0:3])
				print(predicted_transformation[0,3:7]*(180/np.pi))
			if display_ptClouds_in_itr:
				helper.display_clouds_data(source_data[0])

		source_voxel = helper.voxelization(source_data, size=32)
		# Feed the placeholders of Network with source data and template data obtained from N-Iterations.
		feed_dict = {ops['source_pointclouds_pl']: source_voxel,
					 ops['template_pointclouds_pl']: template_voxel,
					 ops['transformation_pl']: TRANSFORMATIONS,
					 ops['gt_transformation_pl']: helper.pose2mat_inv(batch_euler_poses),
					 ops['is_training_pl']: is_training}

		# Ask the network to predict transformation, calculate loss using distance between actual points.
		summary, step, loss_val, predicted_transformation = sess.run([ops['merged'], ops['step'], ops['loss'], ops['predicted_transformation']], feed_dict=feed_dict)

		eval_writer.add_summary(summary, step)			# Add all the summary to the tensorboard.

		# Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
		if FLAGS.use_partial_data:
			TRANSFORMATIONS, complete_source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, complete_source_data)
			source_data = helper.find_partial_data(complete_source_data)
		else:
			TRANSFORMATIONS, source_data = helper.transformation_quat2mat(predicted_transformation, TRANSFORMATIONS, source_data)

		final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)		# Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

		# Display the ground truth pose and predicted pose for first Point Cloud in batch 
		if display_poses:
			print('Ground Truth Position: {}'.format(batch_euler_poses[0,0:3].tolist()))
			print('Predicted Position: {}'.format(final_pose[0,0:3].tolist()))
			print('Ground Truth Orientation: {}'.format((batch_euler_poses[0,3:6]*(180/np.pi)).tolist()))
			print('Predicted Orientation: {}'.format((final_pose[0,3:6]*(180/np.pi)).tolist()))

		# Display Loss Value.
		print("Batch: {}, Loss: {}\r".format(fn, loss_val),end='')

		# Add loss for each batch.
		loss_sum += loss_val
	print('\n')
	log_string('Eval Mean loss: %f' % (loss_sum/num_batches))		# Store and display mean loss of epoch.
def eval_one_epoch(sess, ops_L, eval_writer, templates, poses):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops_L:		Dictionary for tensors of Network_L
    # ops19: 		Dictionary for tensors of Network19
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    #templates = helper.shuffle_templates(templates)
    #poses = helper.shuffle_poses(poses)

    loss_sum = 0  # Total Loss in each batch.
    num_batches = int(poses.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.

    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        template_data = np.copy(templates[2, :, :]).reshape(1, -1, 3)
        template_data = np.tile(template_data, (BATCH_SIZE, 1, 1))
        batch_euler_poses = poses[start_idx:
                                  end_idx]  # Extract poses for batch training.

        source_data = helper.apply_transformation(
            template_data, batch_euler_poses
        )  # Apply the poses on the templates to get source data.

        # Chose Random Points from point clouds for training.
        if np.random.random_sample() < 0:
            source_data = helper.select_random_points(
                source_data, NUM_POINT
            )  # 30% probability that source data has different points than template
        else:
            source_data = source_data[:, 0:NUM_POINT, :]

        if np.random.random_sample() < 0:
            source_data = helper.add_noise(
                source_data)  # 50% chance of having noise in training data.

        # Only chose limited number of points from the source and template data.
        template_data = template_data[:, 0:NUM_POINT, :]
        source_data = source_data[:, 0:NUM_POINT, :]

        # Subtract the Centroids from the Point Clouds.
        if centroid_subtraction_switch:
            source_data = source_data - np.mean(
                source_data, axis=1, keepdims=True)
            template_data = template_data - np.mean(
                template_data, axis=1, keepdims=True)

        # To visualize the source and point clouds:
        if display_ptClouds:
            helper.display_clouds_data(source_data[0])
            helper.display_clouds_data(template_data[0])

        # Feed the placeholders of Network_L with source data and template data obtained from N-Iterations.
        feed_dict = {
            ops_L['source_pointclouds_pl']: source_data,
            ops_L['template_pointclouds_pl']: template_data,
            ops_L['is_training_pl']: is_training
        }

        # Ask the network to predict transformation, calculate loss using distance between actual points.
        summary, step, loss_val, predicted_transformation = sess.run(
            [
                ops_L['merged'], ops_L['step'], ops_L['loss'],
                ops_L['predicted_transformation']
            ],
            feed_dict=feed_dict)
        eval_writer.add_summary(
            summary, step)  # Add all the summary to the tensorboard.

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

        # Display Loss Value.
        print("Batch: {} & Loss: {}\r".format(fn, loss_val), end='')

        # Add loss for each batch.
        loss_sum += loss_val
    print('\n')
    log_string(
        'Eval Mean loss: %f' %
        (loss_sum / num_batches))  # Store and display mean loss of epoch.
Ejemplo n.º 4
0
def resnet50_noisy(input_node, netparams, err_mean, err_stddev, train_vars):
	weights_noisy, biases_noisy, err_w, err_b = helper.add_noise(netparams['weights'], netparams['biases'], err_mean, err_stddev, train_vars)
	mean, variance, scale, offset = netparams['mean'], netparams['variance'], netparams['scale'], netparams['offset']
	err_lyr = {}
	layers_err  = {}
	data_spec = helper.get_data_spec('resnet50')
	err_lyr['input'] = tf.get_variable(name='input_lyr_err', shape=(1, data_spec.crop_size, data_spec.crop_size, data_spec.channels), initializer=tf.random_normal_initializer(mean=err_mean[0], stddev=err_stddev[0]), trainable=train_vars[0])
	input_node_noisy = tf.add(input_node, err_lyr['input'])
	conv1 = conv(input_node_noisy, weights_noisy['conv1'], biases_noisy['conv1'], 2, 2, relu=False)
	err_lyr['conv1'] = tf.get_variable(name='conv1_lyr_err', shape=conv1.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['conv1'] = tf.add(conv1, err_lyr['conv1'])
	bn_conv1 = batch_normalization(layers_err['conv1'], scale['bn_conv1'], offset['bn_conv1'], mean['bn_conv1'], variance['bn_conv1'], relu=True)
	pool1 = max_pool(bn_conv1, 3, 3, 2, 2)
	res2a_branch1 = conv(pool1, weights_noisy['res2a_branch1'], biases_noisy['res2a_branch1'], 1, 1, biased=False, relu=False)
	err_lyr['res2a_branch1'] = tf.get_variable(name='res2a_branch1_lyr_err', shape=res2a_branch1.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2a_branch1'] = tf.add(res2a_branch1, err_lyr['res2a_branch1'])
	bn2a_branch1 = batch_normalization(layers_err['res2a_branch1'], scale['bn2a_branch1'], offset['bn2a_branch1'], mean['bn2a_branch1'], variance['bn2a_branch1'])
	res2a_branch2a = conv(pool1, weights_noisy['res2a_branch2a'], biases_noisy['res2a_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res2a_branch2a'] = tf.get_variable(name='res2a_branch2a_lyr_err', shape=res2a_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2a_branch2a'] = tf.add(res2a_branch2a, err_lyr['res2a_branch2a'])
	bn2a_branch2a = batch_normalization(layers_err['res2a_branch2a'], scale['bn2a_branch2a'], offset['bn2a_branch2a'], mean['bn2a_branch2a'], variance['bn2a_branch2a'], relu=True)
	res2a_branch2b = conv(bn2a_branch2a, weights_noisy['res2a_branch2b'], biases_noisy['res2a_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res2a_branch2b'] = tf.get_variable(name='res2a_branch2b_lyr_err', shape=res2a_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2a_branch2b'] = tf.add(res2a_branch2b, err_lyr['res2a_branch2b'])
	bn2a_branch2b = batch_normalization(layers_err['res2a_branch2b'], scale['bn2a_branch2b'], offset['bn2a_branch2b'], mean['bn2a_branch2b'], variance['bn2a_branch2b'], relu=True)
	res2a_branch2c = conv(bn2a_branch2b, weights_noisy['res2a_branch2c'], biases_noisy['res2a_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res2a_branch2c'] = tf.get_variable(name='res2a_branch2c_lyr_err', shape=res2a_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2a_branch2c'] = tf.add(res2a_branch2c, err_lyr['res2a_branch2c'])
	bn2a_branch2c = batch_normalization(layers_err['res2a_branch2c'], scale['bn2a_branch2c'], offset['bn2a_branch2c'], mean['bn2a_branch2c'], variance['bn2a_branch2c'])
	res2a = add([bn2a_branch1, bn2a_branch2c])
	err_lyr['res2a'] = tf.get_variable(name='res2a_lyr_err', shape=res2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2a'] = tf.add(res2a, err_lyr['res2a'])
	res2a_relu = relu(layers_err['res2a'])
	res2b_branch2a = conv(res2a_relu, weights_noisy['res2b_branch2a'], biases_noisy['res2b_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res2b_branch2a'] = tf.get_variable(name='res2b_branch2a_lyr_err', shape=res2b_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2b_branch2a'] = tf.add(res2b_branch2a, err_lyr['res2b_branch2a'])
	bn2b_branch2a = batch_normalization(layers_err['res2b_branch2a'], scale['bn2b_branch2a'], offset['bn2b_branch2a'], mean['bn2b_branch2a'], variance['bn2b_branch2a'], relu=True)
	res2b_branch2b = conv(bn2b_branch2a, weights_noisy['res2b_branch2b'], biases_noisy['res2b_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res2b_branch2b'] = tf.get_variable(name='res2b_branch2b_lyr_err', shape=res2b_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2b_branch2b'] = tf.add(res2b_branch2b, err_lyr['res2b_branch2b'])
	bn2b_branch2b = batch_normalization(layers_err['res2b_branch2b'], scale['bn2b_branch2b'], offset['bn2b_branch2b'], mean['bn2b_branch2b'], variance['bn2b_branch2b'], relu=True)
	res2b_branch2c = conv(bn2b_branch2b, weights_noisy['res2b_branch2c'], biases_noisy['res2b_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res2b_branch2c'] = tf.get_variable(name='res2b_branch2c_lyr_err', shape=res2b_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2b_branch2c'] = tf.add(res2b_branch2c, err_lyr['res2b_branch2c'])
	bn2b_branch2c = batch_normalization(layers_err['res2b_branch2c'], scale['bn2b_branch2c'], offset['bn2b_branch2c'], mean['bn2b_branch2c'], variance['bn2b_branch2c'])
	res2b = add([res2a_relu, bn2b_branch2c])
	err_lyr['res2b'] = tf.get_variable(name='res2b_lyr_err', shape=res2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2b'] = tf.add(res2b, err_lyr['res2b'])
	res2b_relu = relu(layers_err['res2b'])
	res2c_branch2a = conv(res2b_relu, weights_noisy['res2c_branch2a'], biases_noisy['res2c_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res2c_branch2a'] = tf.get_variable(name='res2c_branch2a_lyr_err', shape=res2c_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2c_branch2a'] = tf.add(res2c_branch2a, err_lyr['res2c_branch2a'])
	bn2c_branch2a = batch_normalization(layers_err['res2c_branch2a'], scale['bn2c_branch2a'], offset['bn2c_branch2a'], mean['bn2c_branch2a'], variance['bn2c_branch2a'], relu=True)
	res2c_branch2b = conv(bn2c_branch2a, weights_noisy['res2c_branch2b'], biases_noisy['res2c_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res2c_branch2b'] = tf.get_variable(name='res2c_branch2b_lyr_err', shape=res2c_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2c_branch2b'] = tf.add(res2c_branch2b, err_lyr['res2c_branch2b'])
	bn2c_branch2b = batch_normalization(layers_err['res2c_branch2b'], scale['bn2c_branch2b'], offset['bn2c_branch2b'], mean['bn2c_branch2b'], variance['bn2c_branch2b'], relu=True)
	res2c_branch2c = conv(bn2c_branch2b, weights_noisy['res2c_branch2c'], biases_noisy['res2c_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res2c_branch2c'] = tf.get_variable(name='res2c_branch2c_lyr_err', shape=res2c_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2c_branch2c'] = tf.add(res2c_branch2c, err_lyr['res2c_branch2c'])
	bn2c_branch2c = batch_normalization(layers_err['res2c_branch2c'], scale['bn2c_branch2c'], offset['bn2c_branch2c'], mean['bn2c_branch2c'], variance['bn2c_branch2c'])
	res2c = add([res2b_relu, bn2c_branch2c])
	err_lyr['res2c'] = tf.get_variable(name='res2c_lyr_err', shape=res2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res2c'] = tf.add(res2c, err_lyr['res2c'])
	res2c_relu = relu(layers_err['res2c'])
	res3a_branch1 = conv(res2c_relu, weights_noisy['res3a_branch1'], biases_noisy['res3a_branch1'], 2, 2, biased=False, relu=False)
	err_lyr['res3a_branch1'] = tf.get_variable(name='res3a_branch1_lyr_err', shape=res3a_branch1.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3a_branch1'] = tf.add(res3a_branch1, err_lyr['res3a_branch1'])
	bn3a_branch1 = batch_normalization(layers_err['res3a_branch1'], scale['bn3a_branch1'], offset['bn3a_branch1'], mean['bn3a_branch1'], variance['bn3a_branch1'])
	res3a_branch2a = conv(res2c_relu, weights_noisy['res3a_branch2a'], biases_noisy['res3a_branch2a'], 2, 2, biased=False, relu=False)
	err_lyr['res3a_branch2a'] = tf.get_variable(name='res3a_branch2a_lyr_err', shape=res3a_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3a_branch2a'] = tf.add(res3a_branch2a, err_lyr['res3a_branch2a'])
	bn3a_branch2a = batch_normalization(layers_err['res3a_branch2a'], scale['bn3a_branch2a'], offset['bn3a_branch2a'], mean['bn3a_branch2a'], variance['bn3a_branch2a'], relu=True)
	res3a_branch2b = conv(bn3a_branch2a, weights_noisy['res3a_branch2b'], biases_noisy['res3a_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res3a_branch2b'] = tf.get_variable(name='res3a_branch2b_lyr_err', shape=res3a_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3a_branch2b'] = tf.add(res3a_branch2b, err_lyr['res3a_branch2b'])
	bn3a_branch2b = batch_normalization(layers_err['res3a_branch2b'], scale['bn3a_branch2b'], offset['bn3a_branch2b'], mean['bn3a_branch2b'], variance['bn3a_branch2b'], relu=True)
	res3a_branch2c = conv(bn3a_branch2b, weights_noisy['res3a_branch2c'], biases_noisy['res3a_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res3a_branch2c'] = tf.get_variable(name='res3a_branch2c_lyr_err', shape=res3a_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3a_branch2c'] = tf.add(res3a_branch2c, err_lyr['res3a_branch2c'])
	bn3a_branch2c = batch_normalization(layers_err['res3a_branch2c'], scale['bn3a_branch2c'], offset['bn3a_branch2c'], mean['bn3a_branch2c'], variance['bn3a_branch2c'])
	res3a = add([bn3a_branch1, bn3a_branch2c])
	err_lyr['res3a'] = tf.get_variable(name='res3a_lyr_err', shape=res3a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3a'] = tf.add(res3a, err_lyr['res3a'])
	res3a_relu = relu(layers_err['res3a'])
	res3b_branch2a = conv(res3a_relu, weights_noisy['res3b_branch2a'], biases_noisy['res3b_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res3b_branch2a'] = tf.get_variable(name='res3b_branch2a_lyr_err', shape=res3b_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3b_branch2a'] = tf.add(res3b_branch2a, err_lyr['res3b_branch2a'])
	bn3b_branch2a = batch_normalization(layers_err['res3b_branch2a'], scale['bn3b_branch2a'], offset['bn3b_branch2a'], mean['bn3b_branch2a'], variance['bn3b_branch2a'], relu=True)
	res3b_branch2b = conv(bn3b_branch2a, weights_noisy['res3b_branch2b'], biases_noisy['res3b_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res3b_branch2b'] = tf.get_variable(name='res3b_branch2b_lyr_err', shape=res3b_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3b_branch2b'] = tf.add(res3b_branch2b, err_lyr['res3b_branch2b'])
	bn3b_branch2b = batch_normalization(layers_err['res3b_branch2b'], scale['bn3b_branch2b'], offset['bn3b_branch2b'], mean['bn3b_branch2b'], variance['bn3b_branch2b'], relu=True)
	res3b_branch2c = conv(bn3b_branch2b, weights_noisy['res3b_branch2c'], biases_noisy['res3b_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res3b_branch2c'] = tf.get_variable(name='res3b_branch2c_lyr_err', shape=res3b_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3b_branch2c'] = tf.add(res3b_branch2c, err_lyr['res3b_branch2c'])
	bn3b_branch2c = batch_normalization(layers_err['res3b_branch2c'], scale['bn3b_branch2c'], offset['bn3b_branch2c'], mean['bn3b_branch2c'], variance['bn3b_branch2c'])
	res3b = add([res3a_relu, bn3b_branch2c])
	err_lyr['res3b'] = tf.get_variable(name='res3b_lyr_err', shape=res3b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3b'] = tf.add(res3b, err_lyr['res3b'])
	res3b_relu = relu(layers_err['res3b'])
	res3c_branch2a = conv(res3b_relu, weights_noisy['res3c_branch2a'], biases_noisy['res3c_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res3c_branch2a'] = tf.get_variable(name='res3c_branch2a_lyr_err', shape=res3c_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3c_branch2a'] = tf.add(res3c_branch2a, err_lyr['res3c_branch2a'])
	bn3c_branch2a = batch_normalization(layers_err['res3c_branch2a'], scale['bn3c_branch2a'], offset['bn3c_branch2a'], mean['bn3c_branch2a'], variance['bn3c_branch2a'], relu=True)
	res3c_branch2b = conv(bn3c_branch2a, weights_noisy['res3c_branch2b'], biases_noisy['res3c_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res3c_branch2b'] = tf.get_variable(name='res3c_branch2b_lyr_err', shape=res3c_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3c_branch2b'] = tf.add(res3c_branch2b, err_lyr['res3c_branch2b'])
	bn3c_branch2b = batch_normalization(layers_err['res3c_branch2b'], scale['bn3c_branch2b'], offset['bn3c_branch2b'], mean['bn3c_branch2b'], variance['bn3c_branch2b'], relu=True)
	res3c_branch2c = conv(bn3c_branch2b, weights_noisy['res3c_branch2c'], biases_noisy['res3c_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res3c_branch2c'] = tf.get_variable(name='res3c_branch2c_lyr_err', shape=res3c_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3c_branch2c'] = tf.add(res3c_branch2c, err_lyr['res3c_branch2c'])
	bn3c_branch2c = batch_normalization(layers_err['res3c_branch2c'], scale['bn3c_branch2c'], offset['bn3c_branch2c'], mean['bn3c_branch2c'], variance['bn3c_branch2c'])
	res3c = add([res3b_relu, bn3c_branch2c])
	err_lyr['res3c'] = tf.get_variable(name='res3c_lyr_err', shape=res3c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3c'] = tf.add(res3c, err_lyr['res3c'])
	res3c_relu = relu(layers_err['res3c'])
	res3d_branch2a = conv(res3c_relu, weights_noisy['res3d_branch2a'], biases_noisy['res3d_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res3d_branch2a'] = tf.get_variable(name='res3d_branch2a_lyr_err', shape=res3d_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3d_branch2a'] = tf.add(res3d_branch2a, err_lyr['res3d_branch2a'])
	bn3d_branch2a = batch_normalization(layers_err['res3d_branch2a'], scale['bn3d_branch2a'], offset['bn3d_branch2a'], mean['bn3d_branch2a'], variance['bn3d_branch2a'], relu=True)
	res3d_branch2b = conv(bn3d_branch2a, weights_noisy['res3d_branch2b'], biases_noisy['res3d_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res3d_branch2b'] = tf.get_variable(name='res3d_branch2b_lyr_err', shape=res3d_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3d_branch2b'] = tf.add(res3d_branch2b, err_lyr['res3d_branch2b'])
	bn3d_branch2b = batch_normalization(layers_err['res3d_branch2b'], scale['bn3d_branch2b'], offset['bn3d_branch2b'], mean['bn3d_branch2b'], variance['bn3d_branch2b'], relu=True)
	res3d_branch2c = conv(bn3d_branch2b, weights_noisy['res3d_branch2c'], biases_noisy['res3d_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res3d_branch2c'] = tf.get_variable(name='res3d_branch2c_lyr_err', shape=res3d_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3d_branch2c'] = tf.add(res3d_branch2c, err_lyr['res3d_branch2c'])
	bn3d_branch2c = batch_normalization(layers_err['res3d_branch2c'], scale['bn3d_branch2c'], offset['bn3d_branch2c'], mean['bn3d_branch2c'], variance['bn3d_branch2c'])
	res3d = add([res3c_relu, bn3d_branch2c])
	err_lyr['res3d'] = tf.get_variable(name='res3d_lyr_err', shape=res3d.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res3d'] = tf.add(res3d, err_lyr['res3d'])
	res3d_relu = relu(layers_err['res3d'])
	res4a_branch1 = conv(res3d_relu, weights_noisy['res4a_branch1'], biases_noisy['res4a_branch1'], 2, 2, biased=False, relu=False)
	err_lyr['res4a_branch1'] = tf.get_variable(name='res4a_branch1_lyr_err', shape=res4a_branch1.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4a_branch1'] = tf.add(res4a_branch1, err_lyr['res4a_branch1'])
	bn4a_branch1 = batch_normalization(layers_err['res4a_branch1'], scale['bn4a_branch1'], offset['bn4a_branch1'], mean['bn4a_branch1'], variance['bn4a_branch1'])
	res4a_branch2a = conv(res3d_relu, weights_noisy['res4a_branch2a'], biases_noisy['res4a_branch2a'], 2, 2, biased=False, relu=False)
	err_lyr['res4a_branch2a'] = tf.get_variable(name='res4a_branch2a_lyr_err', shape=res4a_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4a_branch2a'] = tf.add(res4a_branch2a, err_lyr['res4a_branch2a'])
	bn4a_branch2a = batch_normalization(layers_err['res4a_branch2a'], scale['bn4a_branch2a'], offset['bn4a_branch2a'], mean['bn4a_branch2a'], variance['bn4a_branch2a'], relu=True)
	res4a_branch2b = conv(bn4a_branch2a, weights_noisy['res4a_branch2b'], biases_noisy['res4a_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res4a_branch2b'] = tf.get_variable(name='res4a_branch2b_lyr_err', shape=res4a_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4a_branch2b'] = tf.add(res4a_branch2b, err_lyr['res4a_branch2b'])
	bn4a_branch2b = batch_normalization(layers_err['res4a_branch2b'], scale['bn4a_branch2b'], offset['bn4a_branch2b'], mean['bn4a_branch2b'], variance['bn4a_branch2b'], relu=True)
	res4a_branch2c = conv(bn4a_branch2b, weights_noisy['res4a_branch2c'], biases_noisy['res4a_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res4a_branch2c'] = tf.get_variable(name='res4a_branch2c_lyr_err', shape=res4a_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4a_branch2c'] = tf.add(res4a_branch2c, err_lyr['res4a_branch2c'])
	bn4a_branch2c = batch_normalization(layers_err['res4a_branch2c'], scale['bn4a_branch2c'], offset['bn4a_branch2c'], mean['bn4a_branch2c'], variance['bn4a_branch2c'])
	res4a = add([bn4a_branch1, bn4a_branch2c])
	err_lyr['res4a'] = tf.get_variable(name='res4a_lyr_err', shape=res4a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4a'] = tf.add(res4a, err_lyr['res4a'])
	res4a_relu = relu(layers_err['res4a'])
	res4b_branch2a = conv(res4a_relu, weights_noisy['res4b_branch2a'], biases_noisy['res4b_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res4b_branch2a'] = tf.get_variable(name='res4b_branch2a_lyr_err', shape=res4b_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4b_branch2a'] = tf.add(res4b_branch2a, err_lyr['res4b_branch2a'])
	bn4b_branch2a = batch_normalization(layers_err['res4b_branch2a'], scale['bn4b_branch2a'], offset['bn4b_branch2a'], mean['bn4b_branch2a'], variance['bn4b_branch2a'], relu=True)
	res4b_branch2b = conv(bn4b_branch2a, weights_noisy['res4b_branch2b'], biases_noisy['res4b_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res4b_branch2b'] = tf.get_variable(name='res4b_branch2b_lyr_err', shape=res4b_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4b_branch2b'] = tf.add(res4b_branch2b, err_lyr['res4b_branch2b'])
	bn4b_branch2b = batch_normalization(layers_err['res4b_branch2b'], scale['bn4b_branch2b'], offset['bn4b_branch2b'], mean['bn4b_branch2b'], variance['bn4b_branch2b'], relu=True)
	res4b_branch2c = conv(bn4b_branch2b, weights_noisy['res4b_branch2c'], biases_noisy['res4b_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res4b_branch2c'] = tf.get_variable(name='res4b_branch2c_lyr_err', shape=res4b_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4b_branch2c'] = tf.add(res4b_branch2c, err_lyr['res4b_branch2c'])
	bn4b_branch2c = batch_normalization(layers_err['res4b_branch2c'], scale['bn4b_branch2c'], offset['bn4b_branch2c'], mean['bn4b_branch2c'], variance['bn4b_branch2c'])
	res4b = add([res4a_relu, bn4b_branch2c])
	err_lyr['res4b'] = tf.get_variable(name='res4b_lyr_err', shape=res4b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4b'] = tf.add(res4b, err_lyr['res4b'])
	res4b_relu = relu(layers_err['res4b'])
	res4c_branch2a = conv(res4b_relu, weights_noisy['res4c_branch2a'], biases_noisy['res4c_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res4c_branch2a'] = tf.get_variable(name='res4c_branch2a_lyr_err', shape=res4c_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4c_branch2a'] = tf.add(res4c_branch2a, err_lyr['res4c_branch2a'])
	bn4c_branch2a = batch_normalization(layers_err['res4c_branch2a'], scale['bn4c_branch2a'], offset['bn4c_branch2a'], mean['bn4c_branch2a'], variance['bn4c_branch2a'], relu=True)
	res4c_branch2b = conv(bn4c_branch2a, weights_noisy['res4c_branch2b'], biases_noisy['res4c_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res4c_branch2b'] = tf.get_variable(name='res4c_branch2b_lyr_err', shape=res4c_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4c_branch2b'] = tf.add(res4c_branch2b, err_lyr['res4c_branch2b'])
	bn4c_branch2b = batch_normalization(layers_err['res4c_branch2b'], scale['bn4c_branch2b'], offset['bn4c_branch2b'], mean['bn4c_branch2b'], variance['bn4c_branch2b'], relu=True)
	res4c_branch2c = conv(bn4c_branch2b, weights_noisy['res4c_branch2c'], biases_noisy['res4c_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res4c_branch2c'] = tf.get_variable(name='res4c_branch2c_lyr_err', shape=res4c_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4c_branch2c'] = tf.add(res4c_branch2c, err_lyr['res4c_branch2c'])
	bn4c_branch2c = batch_normalization(layers_err['res4c_branch2c'], scale['bn4c_branch2c'], offset['bn4c_branch2c'], mean['bn4c_branch2c'], variance['bn4c_branch2c'])
	res4c = add([res4b_relu, bn4c_branch2c])
	err_lyr['res4c'] = tf.get_variable(name='res4c_lyr_err', shape=res4c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4c'] = tf.add(res4c, err_lyr['res4c'])
	res4c_relu = relu(layers_err['res4c'])
	res4d_branch2a = conv(res4c_relu, weights_noisy['res4d_branch2a'], biases_noisy['res4d_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res4d_branch2a'] = tf.get_variable(name='res4d_branch2a_lyr_err', shape=res4d_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4d_branch2a'] = tf.add(res4d_branch2a, err_lyr['res4d_branch2a'])
	bn4d_branch2a = batch_normalization(layers_err['res4d_branch2a'], scale['bn4d_branch2a'], offset['bn4d_branch2a'], mean['bn4d_branch2a'], variance['bn4d_branch2a'], relu=True)
	res4d_branch2b = conv(bn4d_branch2a, weights_noisy['res4d_branch2b'], biases_noisy['res4d_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res4d_branch2b'] = tf.get_variable(name='res4d_branch2b_lyr_err', shape=res4d_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4d_branch2b'] = tf.add(res4d_branch2b, err_lyr['res4d_branch2b'])
	bn4d_branch2b = batch_normalization(layers_err['res4d_branch2b'], scale['bn4d_branch2b'], offset['bn4d_branch2b'], mean['bn4d_branch2b'], variance['bn4d_branch2b'], relu=True)
	res4d_branch2c = conv(bn4d_branch2b, weights_noisy['res4d_branch2c'], biases_noisy['res4d_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res4d_branch2c'] = tf.get_variable(name='res4d_branch2c_lyr_err', shape=res4d_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4d_branch2c'] = tf.add(res4d_branch2c, err_lyr['res4d_branch2c'])
	bn4d_branch2c = batch_normalization(layers_err['res4d_branch2c'], scale['bn4d_branch2c'], offset['bn4d_branch2c'], mean['bn4d_branch2c'], variance['bn4d_branch2c'])
	res4d = add([res4c_relu, bn4d_branch2c])
	err_lyr['res4d'] = tf.get_variable(name='res4d_lyr_err', shape=res4d.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4d'] = tf.add(res4d, err_lyr['res4d'])
	res4d_relu = relu(layers_err['res4d'])
	res4e_branch2a = conv(res4d_relu, weights_noisy['res4e_branch2a'], biases_noisy['res4e_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res4e_branch2a'] = tf.get_variable(name='res4e_branch2a_lyr_err', shape=res4e_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4e_branch2a'] = tf.add(res4e_branch2a, err_lyr['res4e_branch2a'])
	bn4e_branch2a = batch_normalization(layers_err['res4e_branch2a'], scale['bn4e_branch2a'], offset['bn4e_branch2a'], mean['bn4e_branch2a'], variance['bn4e_branch2a'], relu=True)
	res4e_branch2b = conv(bn4e_branch2a, weights_noisy['res4e_branch2b'], biases_noisy['res4e_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res4e_branch2b'] = tf.get_variable(name='res4e_branch2b_lyr_err', shape=res4e_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4e_branch2b'] = tf.add(res4e_branch2b, err_lyr['res4e_branch2b'])
	bn4e_branch2b = batch_normalization(layers_err['res4e_branch2b'], scale['bn4e_branch2b'], offset['bn4e_branch2b'], mean['bn4e_branch2b'], variance['bn4e_branch2b'], relu=True)
	res4e_branch2c = conv(bn4e_branch2b, weights_noisy['res4e_branch2c'], biases_noisy['res4e_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res4e_branch2c'] = tf.get_variable(name='res4e_branch2c_lyr_err', shape=res4e_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4e_branch2c'] = tf.add(res4e_branch2c, err_lyr['res4e_branch2c'])
	bn4e_branch2c = batch_normalization(layers_err['res4e_branch2c'], scale['bn4e_branch2c'], offset['bn4e_branch2c'], mean['bn4e_branch2c'], variance['bn4e_branch2c'])
	res4e = add([res4d_relu, bn4e_branch2c])
	err_lyr['res4e'] = tf.get_variable(name='res4e_lyr_err', shape=res4e.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4e'] = tf.add(res4e, err_lyr['res4e'])
	res4e_relu = relu(layers_err['res4e'])
	res4f_branch2a = conv(res4e_relu, weights_noisy['res4f_branch2a'], biases_noisy['res4f_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res4f_branch2a'] = tf.get_variable(name='res4f_branch2a_lyr_err', shape=res4f_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4f_branch2a'] = tf.add(res4f_branch2a, err_lyr['res4f_branch2a'])
	bn4f_branch2a = batch_normalization(layers_err['res4f_branch2a'], scale['bn4f_branch2a'], offset['bn4f_branch2a'], mean['bn4f_branch2a'], variance['bn4f_branch2a'], relu=True)
	res4f_branch2b = conv(bn4f_branch2a, weights_noisy['res4f_branch2b'], biases_noisy['res4f_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res4f_branch2b'] = tf.get_variable(name='res4f_branch2b_lyr_err', shape=res4f_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4f_branch2b'] = tf.add(res4f_branch2b, err_lyr['res4f_branch2b'])
	bn4f_branch2b = batch_normalization(layers_err['res4f_branch2b'], scale['bn4f_branch2b'], offset['bn4f_branch2b'], mean['bn4f_branch2b'], variance['bn4f_branch2b'], relu=True)
	res4f_branch2c = conv(bn4f_branch2b, weights_noisy['res4f_branch2c'], biases_noisy['res4f_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res4f_branch2c'] = tf.get_variable(name='res4f_branch2c_lyr_err', shape=res4f_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4f_branch2c'] = tf.add(res4f_branch2c, err_lyr['res4f_branch2c'])
	bn4f_branch2c = batch_normalization(layers_err['res4f_branch2c'], scale['bn4f_branch2c'], offset['bn4f_branch2c'], mean['bn4f_branch2c'], variance['bn4f_branch2c'])
	res4f = add([res4e_relu, bn4f_branch2c])
	err_lyr['res4f'] = tf.get_variable(name='res4f_lyr_err', shape=res4f.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res4f'] = tf.add(res4f, err_lyr['res4f'])
	res4f_relu = relu(layers_err['res4f'])
	res5a_branch1 = conv(res4f_relu, weights_noisy['res5a_branch1'], biases_noisy['res5a_branch1'], 2, 2, biased=False, relu=False)
	err_lyr['res5a_branch1'] = tf.get_variable(name='res5a_branch1_lyr_err', shape=res5a_branch1.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5a_branch1'] = tf.add(res5a_branch1, err_lyr['res5a_branch1'])
	bn5a_branch1 = batch_normalization(layers_err['res5a_branch1'], scale['bn5a_branch1'], offset['bn5a_branch1'], mean['bn5a_branch1'], variance['bn5a_branch1'])
	res5a_branch2a = conv(res4f_relu, weights_noisy['res5a_branch2a'], biases_noisy['res5a_branch2a'], 2, 2, biased=False, relu=False)
	err_lyr['res5a_branch2a'] = tf.get_variable(name='res5a_branch2a_lyr_err', shape=res5a_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5a_branch2a'] = tf.add(res5a_branch2a, err_lyr['res5a_branch2a'])
	bn5a_branch2a = batch_normalization(layers_err['res5a_branch2a'], scale['bn5a_branch2a'], offset['bn5a_branch2a'], mean['bn5a_branch2a'], variance['bn5a_branch2a'], relu=True)
	res5a_branch2b = conv(bn5a_branch2a, weights_noisy['res5a_branch2b'], biases_noisy['res5a_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res5a_branch2b'] = tf.get_variable(name='res5a_branch2b_lyr_err', shape=res5a_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5a_branch2b'] = tf.add(res5a_branch2b, err_lyr['res5a_branch2b'])
	bn5a_branch2b = batch_normalization(layers_err['res5a_branch2b'], scale['bn5a_branch2b'], offset['bn5a_branch2b'], mean['bn5a_branch2b'], variance['bn5a_branch2b'], relu=True)
	res5a_branch2c = conv(bn5a_branch2b, weights_noisy['res5a_branch2c'], biases_noisy['res5a_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res5a_branch2c'] = tf.get_variable(name='res5a_branch2c_lyr_err', shape=res5a_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5a_branch2c'] = tf.add(res5a_branch2c, err_lyr['res5a_branch2c'])
	bn5a_branch2c = batch_normalization(layers_err['res5a_branch2c'], scale['bn5a_branch2c'], offset['bn5a_branch2c'], mean['bn5a_branch2c'], variance['bn5a_branch2c'])
	res5a = add([bn5a_branch1, bn5a_branch2c])
	err_lyr['res5a'] = tf.get_variable(name='res5a_lyr_err', shape=res5a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5a'] = tf.add(res5a, err_lyr['res5a'])
	res5a_relu = relu(layers_err['res5a'])
	res5b_branch2a = conv(res5a_relu, weights_noisy['res5b_branch2a'], biases_noisy['res5b_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res5b_branch2a'] = tf.get_variable(name='res5b_branch2a_lyr_err', shape=res5b_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5b_branch2a'] = tf.add(res5b_branch2a, err_lyr['res5b_branch2a'])
	bn5b_branch2a = batch_normalization(layers_err['res5b_branch2a'], scale['bn5b_branch2a'], offset['bn5b_branch2a'], mean['bn5b_branch2a'], variance['bn5b_branch2a'], relu=True)
	res5b_branch2b = conv(bn5b_branch2a, weights_noisy['res5b_branch2b'], biases_noisy['res5b_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res5b_branch2b'] = tf.get_variable(name='res5b_branch2b_lyr_err', shape=res5b_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5b_branch2b'] = tf.add(res5b_branch2b, err_lyr['res5b_branch2b'])
	bn5b_branch2b = batch_normalization(layers_err['res5b_branch2b'], scale['bn5b_branch2b'], offset['bn5b_branch2b'], mean['bn5b_branch2b'], variance['bn5b_branch2b'], relu=True)
	res5b_branch2c = conv(bn5b_branch2b, weights_noisy['res5b_branch2c'], biases_noisy['res5b_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res5b_branch2c'] = tf.get_variable(name='res5b_branch2c_lyr_err', shape=res5b_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5b_branch2c'] = tf.add(res5b_branch2c, err_lyr['res5b_branch2c'])
	bn5b_branch2c = batch_normalization(layers_err['res5b_branch2c'], scale['bn5b_branch2c'], offset['bn5b_branch2c'], mean['bn5b_branch2c'], variance['bn5b_branch2c'])
	res5b = add([res5a_relu, bn5b_branch2c])
	err_lyr['res5b'] = tf.get_variable(name='res5b_lyr_err', shape=res5b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5b'] = tf.add(res5b, err_lyr['res5b'])
	res5b_relu = relu(layers_err['res5b'])
	res5c_branch2a = conv(res5b_relu, weights_noisy['res5c_branch2a'], biases_noisy['res5c_branch2a'], 1, 1, biased=False, relu=False)
	err_lyr['res5c_branch2a'] = tf.get_variable(name='res5c_branch2a_lyr_err', shape=res5c_branch2a.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5c_branch2a'] = tf.add(res5c_branch2a, err_lyr['res5c_branch2a'])
	bn5c_branch2a = batch_normalization(layers_err['res5c_branch2a'], scale['bn5c_branch2a'], offset['bn5c_branch2a'], mean['bn5c_branch2a'], variance['bn5c_branch2a'], relu=True)
	res5c_branch2b = conv(bn5c_branch2a, weights_noisy['res5c_branch2b'], biases_noisy['res5c_branch2b'], 1, 1, biased=False, relu=False)
	err_lyr['res5c_branch2b'] = tf.get_variable(name='res5c_branch2b_lyr_err', shape=res5c_branch2b.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5c_branch2b'] = tf.add(res5c_branch2b, err_lyr['res5c_branch2b'])
	bn5c_branch2b = batch_normalization(layers_err['res5c_branch2b'], scale['bn5c_branch2b'], offset['bn5c_branch2b'], mean['bn5c_branch2b'], variance['bn5c_branch2b'], relu=True)
	res5c_branch2c = conv(bn5c_branch2b, weights_noisy['res5c_branch2c'], biases_noisy['res5c_branch2c'], 1, 1, biased=False, relu=False)
	err_lyr['res5c_branch2c'] = tf.get_variable(name='res5c_branch2c_lyr_err', shape=res5c_branch2c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5c_branch2c'] = tf.add(res5c_branch2c, err_lyr['res5c_branch2c'])
	bn5c_branch2c = batch_normalization(layers_err['res5c_branch2c'], scale['bn5c_branch2c'], offset['bn5c_branch2c'], mean['bn5c_branch2c'], variance['bn5c_branch2c'])
	res5c = add([res5b_relu, bn5c_branch2c])
	err_lyr['res5c'] = tf.get_variable(name='res5c_lyr_err', shape=res5c.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['res5c'] = tf.add(res5c, err_lyr['res5c'])
	res5c_relu = relu(layers_err['res5c'])
	pool5 = avg_pool(res5c_relu, 7, 7, 1, 1, padding='VALID')
	fc1000 = fc(pool5, weights_noisy['fc1000'], biases_noisy['fc1000'], relu=False)
	err_lyr['fc1000'] = tf.get_variable(name='fc1000_lyr_err', shape=fc1000.shape[1:], initializer=tf.random_normal_initializer(mean=err_mean[3], stddev=err_stddev[3]), trainable=train_vars[3])
	layers_err['fc1000'] = tf.add(fc1000, err_lyr['fc1000'])
	return layers_err['fc1000'], err_w, err_b, err_lyr
	
Ejemplo n.º 5
0
def eval_one_epoch(sess, ops, eval_writer, poses):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    loss_sum = 0  # Total Loss in each batch.
    templates, sources, poses = helper.read_partial_data(
        'train_data', 'partial_data_eval.h5')
    num_batches = int(templates.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.

    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        template_data = np.copy(templates[start_idx:end_idx])
        batch_euler_poses = np.copy(poses[start_idx:end_idx])

        # Check for partial source.
        if np.random.sample() < ADD_PARTIAL:
            source_data = np.copy(sources[start_idx:end_idx])
            source_full_data = helper.apply_transformation(
                template_data, batch_euler_poses)
        else:
            source_data = np.copy(templates[start_idx:end_idx, 0:512])
            source_data = helper.apply_transformation(source_data,
                                                      batch_euler_poses)
            source_full_data = helper.apply_transformation(
                template_data, batch_euler_poses)

        # Check for noise in source.
        if np.random.sample() < ADD_NOISE:
            source_data = helper.add_noise(source_data)

        if centroid_subtraction_switch:
            source_data = source_data - np.mean(
                source_data, axis=1, keepdims=True)
            template_data = template_data - np.mean(
                template_data, axis=1, keepdims=True)
            source_full_data = source_full_data - np.mean(
                source_full_data, axis=1, keepdims=True)

        # Only chose limited number of points from the source and template data.
        template_data = template_data[:, 0:NUM_POINT, :]
        source_full_data = source_full_data[:, 0:NUM_POINT, :]

        # To visualize the source and point clouds:
        if display_ptClouds:
            helper.display_clouds_data(source_data[0])
            helper.display_clouds_data(template_data[0])

        TRANSFORMATIONS = np.identity(
            4)  # Initialize identity transformation matrix.
        TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
            BATCH_SIZE, 4,
            4)  # Intialize identity matrices of size equal to batch_size

        # Iterations for pose refinement.
        for loop_idx in range(MAX_LOOPS - 1):
            # 4a
            # Feed the placeholders of Network with template data and source data.
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training
            }
            predicted_transformation = sess.run(
                [ops['predicted_transformation']],
                feed_dict=feed_dict)  # Ask the network to predict the pose.

            TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                predicted_transformation, TRANSFORMATIONS, source_data)

            # Display Results after each iteration.
            if display_poses_in_itr:
                print(predicted_transformation[0, 0:3])
                print(predicted_transformation[0, 3:7] * (180 / np.pi))
            if display_ptClouds_in_itr:
                helper.display_clouds_data(source_data[0])

        # Feed the placeholders of Network with source data and template data obtained from N-Iterations.
        feed_dict = {
            ops['source_pointclouds_pl']: source_data,
            ops['template_pointclouds_pl']: template_data,
            ops['full_source_pointclouds_pl']: source_full_data,
            ops['is_training_pl']: is_training
        }

        # Ask the network to predict transformation, calculate loss using distance between actual points.
        summary, step, loss_val, predicted_transformation = sess.run(
            [
                ops['merged'], ops['step'], ops['loss'],
                ops['predicted_transformation']
            ],
            feed_dict=feed_dict)

        eval_writer.add_summary(
            summary, step)  # Add all the summary to the tensorboard.

        TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
            predicted_transformation, TRANSFORMATIONS, source_data)

        final_pose = helper.find_final_pose_inv(
            TRANSFORMATIONS
        )  # Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

        # Display Loss Value.
        print("Batch: {}, Loss: {}\r".format(fn, loss_val), end='')

        # Add loss for each batch.
        loss_sum += loss_val
    print('\n')
    log_string(
        'Eval Mean loss: %f' %
        (loss_sum / num_batches))  # Store and display mean loss of epoch.
Ejemplo n.º 6
0
def nin_noisy(input_node, netparams, err_mean, err_stddev, train_vars):
    weights_noisy, biases_noisy, err_w, err_b = helper.add_noise(
        netparams['weights'], netparams['biases'], err_mean, err_stddev,
        train_vars)
    mean, variance, scale, offset = netparams['mean'], netparams[
        'variance'], netparams['scale'], netparams['offset']
    err_lyr = {}
    layers_err = {}
    data_spec = helper.get_data_spec('nin')
    err_lyr['input'] = tf.get_variable(
        name='input_lyr_err',
        shape=(1, data_spec.crop_size, data_spec.crop_size,
               data_spec.channels),
        initializer=tf.random_normal_initializer(mean=err_mean[0],
                                                 stddev=err_stddev[0]),
        trainable=train_vars[0])
    input_node_noisy = tf.add(input_node, err_lyr['input'])
    conv1 = conv(input_node_noisy,
                 weights_noisy['conv1'],
                 biases_noisy['conv1'],
                 4,
                 4,
                 padding='VALID')
    err_lyr['conv1'] = tf.get_variable(
        name='conv1_lyr_err',
        shape=conv1.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['conv1'] = tf.add(conv1, err_lyr['conv1'])
    cccp1 = conv(layers_err['conv1'], weights_noisy['cccp1'],
                 biases_noisy['cccp1'], 1, 1)
    err_lyr['cccp1'] = tf.get_variable(
        name='cccp1_lyr_err',
        shape=cccp1.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['cccp1'] = tf.add(cccp1, err_lyr['cccp1'])
    cccp2 = conv(layers_err['cccp1'], weights_noisy['cccp2'],
                 biases_noisy['cccp2'], 1, 1)
    err_lyr['cccp2'] = tf.get_variable(
        name='cccp2_lyr_err',
        shape=cccp2.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['cccp2'] = tf.add(cccp2, err_lyr['cccp2'])
    pool1 = max_pool(layers_err['cccp2'], 3, 3, 2, 2)
    conv2 = conv(pool1, weights_noisy['conv2'], biases_noisy['conv2'], 1, 1)
    err_lyr['conv2'] = tf.get_variable(
        name='conv2_lyr_err',
        shape=conv2.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['conv2'] = tf.add(conv2, err_lyr['conv2'])
    cccp3 = conv(layers_err['conv2'], weights_noisy['cccp3'],
                 biases_noisy['cccp3'], 1, 1)
    err_lyr['cccp3'] = tf.get_variable(
        name='cccp3_lyr_err',
        shape=cccp3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['cccp3'] = tf.add(cccp3, err_lyr['cccp3'])
    cccp4 = conv(layers_err['cccp3'], weights_noisy['cccp4'],
                 biases_noisy['cccp4'], 1, 1)
    err_lyr['cccp4'] = tf.get_variable(
        name='cccp4_lyr_err',
        shape=cccp4.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['cccp4'] = tf.add(cccp4, err_lyr['cccp4'])
    pool2 = max_pool(layers_err['cccp4'], 3, 3, 2, 2, padding='VALID')
    conv3 = conv(pool2, weights_noisy['conv3'], biases_noisy['conv3'], 1, 1)
    err_lyr['conv3'] = tf.get_variable(
        name='conv3_lyr_err',
        shape=conv3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['conv3'] = tf.add(conv3, err_lyr['conv3'])
    cccp5 = conv(layers_err['conv3'], weights_noisy['cccp5'],
                 biases_noisy['cccp5'], 1, 1)
    err_lyr['cccp5'] = tf.get_variable(
        name='cccp5_lyr_err',
        shape=cccp5.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['cccp5'] = tf.add(cccp5, err_lyr['cccp5'])
    cccp6 = conv(layers_err['cccp5'], weights_noisy['cccp6'],
                 biases_noisy['cccp6'], 1, 1)
    err_lyr['cccp6'] = tf.get_variable(
        name='cccp6_lyr_err',
        shape=cccp6.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['cccp6'] = tf.add(cccp6, err_lyr['cccp6'])
    pool3 = max_pool(layers_err['cccp6'], 3, 3, 2, 2, padding='VALID')
    conv4_1024 = conv(pool3, weights_noisy['conv4_1024'],
                      biases_noisy['conv4_1024'], 1, 1)
    err_lyr['conv4_1024'] = tf.get_variable(
        name='conv4_1024_lyr_err',
        shape=conv4_1024.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['conv4_1024'] = tf.add(conv4_1024, err_lyr['conv4_1024'])
    cccp7_1024 = conv(layers_err['conv4_1024'], weights_noisy['cccp7_1024'],
                      biases_noisy['cccp7_1024'], 1, 1)
    err_lyr['cccp7_1024'] = tf.get_variable(
        name='cccp7_1024_lyr_err',
        shape=cccp7_1024.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['cccp7_1024'] = tf.add(cccp7_1024, err_lyr['cccp7_1024'])
    cccp8_1024 = conv(layers_err['cccp7_1024'], weights_noisy['cccp8_1024'],
                      biases_noisy['cccp8_1024'], 1, 1)
    err_lyr['cccp8_1024'] = tf.get_variable(
        name='cccp8_1024_lyr_err',
        shape=cccp8_1024.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['cccp8_1024'] = tf.add(cccp8_1024, err_lyr['cccp8_1024'])
    pool4 = avg_pool(layers_err['cccp8_1024'], 6, 6, 1, 1, padding='VALID')
    return pool4, err_w, err_b, err_lyr
Ejemplo n.º 7
0
def googlenet_noisy(input_node, netparams, err_mean, err_stddev, train_vars):
    weights_noisy, biases_noisy, err_w, err_b = helper.add_noise(
        netparams['weights'], netparams['biases'], err_mean, err_stddev,
        train_vars)
    mean, variance, scale, offset = netparams['mean'], netparams[
        'variance'], netparams['scale'], netparams['offset']
    err_lyr = {}
    layers_err = {}
    data_spec = helper.get_data_spec('googlenet')
    err_lyr['input'] = tf.get_variable(
        name='input_lyr_err',
        shape=(1, data_spec.crop_size, data_spec.crop_size,
               data_spec.channels),
        initializer=tf.random_normal_initializer(mean=err_mean[0],
                                                 stddev=err_stddev[0]),
        trainable=train_vars[0])
    input_node_noisy = tf.add(input_node, err_lyr['input'])
    conv1_7x7_s2 = conv(input_node_noisy, weights_noisy['conv1_7x7_s2'],
                        biases_noisy['conv1_7x7_s2'], 2, 2)
    err_lyr['conv1_7x7_s2'] = tf.get_variable(
        name='conv1_7x7_s2_lyr_err',
        shape=conv1_7x7_s2.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['conv1_7x7_s2'] = tf.add(conv1_7x7_s2, err_lyr['conv1_7x7_s2'])
    pool1_3x3_s2 = max_pool(layers_err['conv1_7x7_s2'], 3, 3, 2, 2)
    pool1_norm1 = lrn(pool1_3x3_s2, 2, 1.99999994948e-05, 0.75)
    conv2_3x3_reduce = conv(pool1_norm1, weights_noisy['conv2_3x3_reduce'],
                            biases_noisy['conv2_3x3_reduce'], 1, 1)
    err_lyr['conv2_3x3_reduce'] = tf.get_variable(
        name='conv2_3x3_reduce_lyr_err',
        shape=conv2_3x3_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['conv2_3x3_reduce'] = tf.add(conv2_3x3_reduce,
                                            err_lyr['conv2_3x3_reduce'])
    conv2_3x3 = conv(layers_err['conv2_3x3_reduce'],
                     weights_noisy['conv2_3x3'], biases_noisy['conv2_3x3'], 1,
                     1)
    err_lyr['conv2_3x3'] = tf.get_variable(
        name='conv2_3x3_lyr_err',
        shape=conv2_3x3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['conv2_3x3'] = tf.add(conv2_3x3, err_lyr['conv2_3x3'])
    conv2_norm2 = lrn(layers_err['conv2_3x3'], 2, 1.99999994948e-05, 0.75)
    pool2_3x3_s2 = max_pool(conv2_norm2, 3, 3, 2, 2)
    inception_3a_1x1 = conv(pool2_3x3_s2, weights_noisy['inception_3a_1x1'],
                            biases_noisy['inception_3a_1x1'], 1, 1)
    err_lyr['inception_3a_1x1'] = tf.get_variable(
        name='inception_3a_1x1_lyr_err',
        shape=inception_3a_1x1.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3a_1x1'] = tf.add(inception_3a_1x1,
                                            err_lyr['inception_3a_1x1'])
    inception_3a_3x3_reduce = conv(pool2_3x3_s2,
                                   weights_noisy['inception_3a_3x3_reduce'],
                                   biases_noisy['inception_3a_3x3_reduce'], 1,
                                   1)
    err_lyr['inception_3a_3x3_reduce'] = tf.get_variable(
        name='inception_3a_3x3_reduce_lyr_err',
        shape=inception_3a_3x3_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3a_3x3_reduce'] = tf.add(
        inception_3a_3x3_reduce, err_lyr['inception_3a_3x3_reduce'])
    inception_3a_3x3 = conv(layers_err['inception_3a_3x3_reduce'],
                            weights_noisy['inception_3a_3x3'],
                            biases_noisy['inception_3a_3x3'], 1, 1)
    err_lyr['inception_3a_3x3'] = tf.get_variable(
        name='inception_3a_3x3_lyr_err',
        shape=inception_3a_3x3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3a_3x3'] = tf.add(inception_3a_3x3,
                                            err_lyr['inception_3a_3x3'])
    inception_3a_5x5_reduce = conv(pool2_3x3_s2,
                                   weights_noisy['inception_3a_5x5_reduce'],
                                   biases_noisy['inception_3a_5x5_reduce'], 1,
                                   1)
    err_lyr['inception_3a_5x5_reduce'] = tf.get_variable(
        name='inception_3a_5x5_reduce_lyr_err',
        shape=inception_3a_5x5_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3a_5x5_reduce'] = tf.add(
        inception_3a_5x5_reduce, err_lyr['inception_3a_5x5_reduce'])
    inception_3a_5x5 = conv(layers_err['inception_3a_5x5_reduce'],
                            weights_noisy['inception_3a_5x5'],
                            biases_noisy['inception_3a_5x5'], 1, 1)
    err_lyr['inception_3a_5x5'] = tf.get_variable(
        name='inception_3a_5x5_lyr_err',
        shape=inception_3a_5x5.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3a_5x5'] = tf.add(inception_3a_5x5,
                                            err_lyr['inception_3a_5x5'])
    inception_3a_pool = max_pool(pool2_3x3_s2, 3, 3, 1, 1)
    inception_3a_pool_proj = conv(inception_3a_pool,
                                  weights_noisy['inception_3a_pool_proj'],
                                  biases_noisy['inception_3a_pool_proj'], 1, 1)
    err_lyr['inception_3a_pool_proj'] = tf.get_variable(
        name='inception_3a_pool_proj_lyr_err',
        shape=inception_3a_pool_proj.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3a_pool_proj'] = tf.add(
        inception_3a_pool_proj, err_lyr['inception_3a_pool_proj'])
    inception_3a_output = concat([
        layers_err['inception_3a_1x1'], layers_err['inception_3a_3x3'],
        layers_err['inception_3a_5x5'], layers_err['inception_3a_pool_proj']
    ], 3)
    inception_3b_1x1 = conv(inception_3a_output,
                            weights_noisy['inception_3b_1x1'],
                            biases_noisy['inception_3b_1x1'], 1, 1)
    err_lyr['inception_3b_1x1'] = tf.get_variable(
        name='inception_3b_1x1_lyr_err',
        shape=inception_3b_1x1.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3b_1x1'] = tf.add(inception_3b_1x1,
                                            err_lyr['inception_3b_1x1'])
    inception_3b_3x3_reduce = conv(inception_3a_output,
                                   weights_noisy['inception_3b_3x3_reduce'],
                                   biases_noisy['inception_3b_3x3_reduce'], 1,
                                   1)
    err_lyr['inception_3b_3x3_reduce'] = tf.get_variable(
        name='inception_3b_3x3_reduce_lyr_err',
        shape=inception_3b_3x3_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3b_3x3_reduce'] = tf.add(
        inception_3b_3x3_reduce, err_lyr['inception_3b_3x3_reduce'])
    inception_3b_3x3 = conv(layers_err['inception_3b_3x3_reduce'],
                            weights_noisy['inception_3b_3x3'],
                            biases_noisy['inception_3b_3x3'], 1, 1)
    err_lyr['inception_3b_3x3'] = tf.get_variable(
        name='inception_3b_3x3_lyr_err',
        shape=inception_3b_3x3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3b_3x3'] = tf.add(inception_3b_3x3,
                                            err_lyr['inception_3b_3x3'])
    inception_3b_5x5_reduce = conv(inception_3a_output,
                                   weights_noisy['inception_3b_5x5_reduce'],
                                   biases_noisy['inception_3b_5x5_reduce'], 1,
                                   1)
    err_lyr['inception_3b_5x5_reduce'] = tf.get_variable(
        name='inception_3b_5x5_reduce_lyr_err',
        shape=inception_3b_5x5_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3b_5x5_reduce'] = tf.add(
        inception_3b_5x5_reduce, err_lyr['inception_3b_5x5_reduce'])
    inception_3b_5x5 = conv(layers_err['inception_3b_5x5_reduce'],
                            weights_noisy['inception_3b_5x5'],
                            biases_noisy['inception_3b_5x5'], 1, 1)
    err_lyr['inception_3b_5x5'] = tf.get_variable(
        name='inception_3b_5x5_lyr_err',
        shape=inception_3b_5x5.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3b_5x5'] = tf.add(inception_3b_5x5,
                                            err_lyr['inception_3b_5x5'])
    inception_3b_pool = max_pool(inception_3a_output, 3, 3, 1, 1)
    inception_3b_pool_proj = conv(inception_3b_pool,
                                  weights_noisy['inception_3b_pool_proj'],
                                  biases_noisy['inception_3b_pool_proj'], 1, 1)
    err_lyr['inception_3b_pool_proj'] = tf.get_variable(
        name='inception_3b_pool_proj_lyr_err',
        shape=inception_3b_pool_proj.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_3b_pool_proj'] = tf.add(
        inception_3b_pool_proj, err_lyr['inception_3b_pool_proj'])
    inception_3b_output = concat([
        layers_err['inception_3b_1x1'], layers_err['inception_3b_3x3'],
        layers_err['inception_3b_5x5'], layers_err['inception_3b_pool_proj']
    ], 3)
    pool3_3x3_s2 = max_pool(inception_3b_output, 3, 3, 2, 2)
    inception_4a_1x1 = conv(pool3_3x3_s2, weights_noisy['inception_4a_1x1'],
                            biases_noisy['inception_4a_1x1'], 1, 1)
    err_lyr['inception_4a_1x1'] = tf.get_variable(
        name='inception_4a_1x1_lyr_err',
        shape=inception_4a_1x1.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4a_1x1'] = tf.add(inception_4a_1x1,
                                            err_lyr['inception_4a_1x1'])
    inception_4a_3x3_reduce = conv(pool3_3x3_s2,
                                   weights_noisy['inception_4a_3x3_reduce'],
                                   biases_noisy['inception_4a_3x3_reduce'], 1,
                                   1)
    err_lyr['inception_4a_3x3_reduce'] = tf.get_variable(
        name='inception_4a_3x3_reduce_lyr_err',
        shape=inception_4a_3x3_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4a_3x3_reduce'] = tf.add(
        inception_4a_3x3_reduce, err_lyr['inception_4a_3x3_reduce'])
    inception_4a_3x3 = conv(layers_err['inception_4a_3x3_reduce'],
                            weights_noisy['inception_4a_3x3'],
                            biases_noisy['inception_4a_3x3'], 1, 1)
    err_lyr['inception_4a_3x3'] = tf.get_variable(
        name='inception_4a_3x3_lyr_err',
        shape=inception_4a_3x3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4a_3x3'] = tf.add(inception_4a_3x3,
                                            err_lyr['inception_4a_3x3'])
    inception_4a_5x5_reduce = conv(pool3_3x3_s2,
                                   weights_noisy['inception_4a_5x5_reduce'],
                                   biases_noisy['inception_4a_5x5_reduce'], 1,
                                   1)
    err_lyr['inception_4a_5x5_reduce'] = tf.get_variable(
        name='inception_4a_5x5_reduce_lyr_err',
        shape=inception_4a_5x5_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4a_5x5_reduce'] = tf.add(
        inception_4a_5x5_reduce, err_lyr['inception_4a_5x5_reduce'])
    inception_4a_5x5 = conv(layers_err['inception_4a_5x5_reduce'],
                            weights_noisy['inception_4a_5x5'],
                            biases_noisy['inception_4a_5x5'], 1, 1)
    err_lyr['inception_4a_5x5'] = tf.get_variable(
        name='inception_4a_5x5_lyr_err',
        shape=inception_4a_5x5.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4a_5x5'] = tf.add(inception_4a_5x5,
                                            err_lyr['inception_4a_5x5'])
    inception_4a_pool = max_pool(pool3_3x3_s2, 3, 3, 1, 1)
    inception_4a_pool_proj = conv(inception_4a_pool,
                                  weights_noisy['inception_4a_pool_proj'],
                                  biases_noisy['inception_4a_pool_proj'], 1, 1)
    err_lyr['inception_4a_pool_proj'] = tf.get_variable(
        name='inception_4a_pool_proj_lyr_err',
        shape=inception_4a_pool_proj.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4a_pool_proj'] = tf.add(
        inception_4a_pool_proj, err_lyr['inception_4a_pool_proj'])
    inception_4a_output = concat([
        layers_err['inception_4a_1x1'], layers_err['inception_4a_3x3'],
        layers_err['inception_4a_5x5'], layers_err['inception_4a_pool_proj']
    ], 3)
    inception_4b_1x1 = conv(inception_4a_output,
                            weights_noisy['inception_4b_1x1'],
                            biases_noisy['inception_4b_1x1'], 1, 1)
    err_lyr['inception_4b_1x1'] = tf.get_variable(
        name='inception_4b_1x1_lyr_err',
        shape=inception_4b_1x1.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4b_1x1'] = tf.add(inception_4b_1x1,
                                            err_lyr['inception_4b_1x1'])
    inception_4b_3x3_reduce = conv(inception_4a_output,
                                   weights_noisy['inception_4b_3x3_reduce'],
                                   biases_noisy['inception_4b_3x3_reduce'], 1,
                                   1)
    err_lyr['inception_4b_3x3_reduce'] = tf.get_variable(
        name='inception_4b_3x3_reduce_lyr_err',
        shape=inception_4b_3x3_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4b_3x3_reduce'] = tf.add(
        inception_4b_3x3_reduce, err_lyr['inception_4b_3x3_reduce'])
    inception_4b_3x3 = conv(layers_err['inception_4b_3x3_reduce'],
                            weights_noisy['inception_4b_3x3'],
                            biases_noisy['inception_4b_3x3'], 1, 1)
    err_lyr['inception_4b_3x3'] = tf.get_variable(
        name='inception_4b_3x3_lyr_err',
        shape=inception_4b_3x3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4b_3x3'] = tf.add(inception_4b_3x3,
                                            err_lyr['inception_4b_3x3'])
    inception_4b_5x5_reduce = conv(inception_4a_output,
                                   weights_noisy['inception_4b_5x5_reduce'],
                                   biases_noisy['inception_4b_5x5_reduce'], 1,
                                   1)
    err_lyr['inception_4b_5x5_reduce'] = tf.get_variable(
        name='inception_4b_5x5_reduce_lyr_err',
        shape=inception_4b_5x5_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4b_5x5_reduce'] = tf.add(
        inception_4b_5x5_reduce, err_lyr['inception_4b_5x5_reduce'])
    inception_4b_5x5 = conv(layers_err['inception_4b_5x5_reduce'],
                            weights_noisy['inception_4b_5x5'],
                            biases_noisy['inception_4b_5x5'], 1, 1)
    err_lyr['inception_4b_5x5'] = tf.get_variable(
        name='inception_4b_5x5_lyr_err',
        shape=inception_4b_5x5.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4b_5x5'] = tf.add(inception_4b_5x5,
                                            err_lyr['inception_4b_5x5'])
    inception_4b_pool = max_pool(inception_4a_output, 3, 3, 1, 1)
    inception_4b_pool_proj = conv(inception_4b_pool,
                                  weights_noisy['inception_4b_pool_proj'],
                                  biases_noisy['inception_4b_pool_proj'], 1, 1)
    err_lyr['inception_4b_pool_proj'] = tf.get_variable(
        name='inception_4b_pool_proj_lyr_err',
        shape=inception_4b_pool_proj.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4b_pool_proj'] = tf.add(
        inception_4b_pool_proj, err_lyr['inception_4b_pool_proj'])
    inception_4b_output = concat([
        layers_err['inception_4b_1x1'], layers_err['inception_4b_3x3'],
        layers_err['inception_4b_5x5'], layers_err['inception_4b_pool_proj']
    ], 3)
    inception_4c_1x1 = conv(inception_4b_output,
                            weights_noisy['inception_4c_1x1'],
                            biases_noisy['inception_4c_1x1'], 1, 1)
    err_lyr['inception_4c_1x1'] = tf.get_variable(
        name='inception_4c_1x1_lyr_err',
        shape=inception_4c_1x1.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4c_1x1'] = tf.add(inception_4c_1x1,
                                            err_lyr['inception_4c_1x1'])
    inception_4c_3x3_reduce = conv(inception_4b_output,
                                   weights_noisy['inception_4c_3x3_reduce'],
                                   biases_noisy['inception_4c_3x3_reduce'], 1,
                                   1)
    err_lyr['inception_4c_3x3_reduce'] = tf.get_variable(
        name='inception_4c_3x3_reduce_lyr_err',
        shape=inception_4c_3x3_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4c_3x3_reduce'] = tf.add(
        inception_4c_3x3_reduce, err_lyr['inception_4c_3x3_reduce'])
    inception_4c_3x3 = conv(layers_err['inception_4c_3x3_reduce'],
                            weights_noisy['inception_4c_3x3'],
                            biases_noisy['inception_4c_3x3'], 1, 1)
    err_lyr['inception_4c_3x3'] = tf.get_variable(
        name='inception_4c_3x3_lyr_err',
        shape=inception_4c_3x3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4c_3x3'] = tf.add(inception_4c_3x3,
                                            err_lyr['inception_4c_3x3'])
    inception_4c_5x5_reduce = conv(inception_4b_output,
                                   weights_noisy['inception_4c_5x5_reduce'],
                                   biases_noisy['inception_4c_5x5_reduce'], 1,
                                   1)
    err_lyr['inception_4c_5x5_reduce'] = tf.get_variable(
        name='inception_4c_5x5_reduce_lyr_err',
        shape=inception_4c_5x5_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4c_5x5_reduce'] = tf.add(
        inception_4c_5x5_reduce, err_lyr['inception_4c_5x5_reduce'])
    inception_4c_5x5 = conv(layers_err['inception_4c_5x5_reduce'],
                            weights_noisy['inception_4c_5x5'],
                            biases_noisy['inception_4c_5x5'], 1, 1)
    err_lyr['inception_4c_5x5'] = tf.get_variable(
        name='inception_4c_5x5_lyr_err',
        shape=inception_4c_5x5.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4c_5x5'] = tf.add(inception_4c_5x5,
                                            err_lyr['inception_4c_5x5'])
    inception_4c_pool = max_pool(inception_4b_output, 3, 3, 1, 1)
    inception_4c_pool_proj = conv(inception_4c_pool,
                                  weights_noisy['inception_4c_pool_proj'],
                                  biases_noisy['inception_4c_pool_proj'], 1, 1)
    err_lyr['inception_4c_pool_proj'] = tf.get_variable(
        name='inception_4c_pool_proj_lyr_err',
        shape=inception_4c_pool_proj.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4c_pool_proj'] = tf.add(
        inception_4c_pool_proj, err_lyr['inception_4c_pool_proj'])
    inception_4c_output = concat([
        layers_err['inception_4c_1x1'], layers_err['inception_4c_3x3'],
        layers_err['inception_4c_5x5'], layers_err['inception_4c_pool_proj']
    ], 3)
    inception_4d_1x1 = conv(inception_4c_output,
                            weights_noisy['inception_4d_1x1'],
                            biases_noisy['inception_4d_1x1'], 1, 1)
    err_lyr['inception_4d_1x1'] = tf.get_variable(
        name='inception_4d_1x1_lyr_err',
        shape=inception_4d_1x1.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4d_1x1'] = tf.add(inception_4d_1x1,
                                            err_lyr['inception_4d_1x1'])
    inception_4d_3x3_reduce = conv(inception_4c_output,
                                   weights_noisy['inception_4d_3x3_reduce'],
                                   biases_noisy['inception_4d_3x3_reduce'], 1,
                                   1)
    err_lyr['inception_4d_3x3_reduce'] = tf.get_variable(
        name='inception_4d_3x3_reduce_lyr_err',
        shape=inception_4d_3x3_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4d_3x3_reduce'] = tf.add(
        inception_4d_3x3_reduce, err_lyr['inception_4d_3x3_reduce'])
    inception_4d_3x3 = conv(layers_err['inception_4d_3x3_reduce'],
                            weights_noisy['inception_4d_3x3'],
                            biases_noisy['inception_4d_3x3'], 1, 1)
    err_lyr['inception_4d_3x3'] = tf.get_variable(
        name='inception_4d_3x3_lyr_err',
        shape=inception_4d_3x3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4d_3x3'] = tf.add(inception_4d_3x3,
                                            err_lyr['inception_4d_3x3'])
    inception_4d_5x5_reduce = conv(inception_4c_output,
                                   weights_noisy['inception_4d_5x5_reduce'],
                                   biases_noisy['inception_4d_5x5_reduce'], 1,
                                   1)
    err_lyr['inception_4d_5x5_reduce'] = tf.get_variable(
        name='inception_4d_5x5_reduce_lyr_err',
        shape=inception_4d_5x5_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4d_5x5_reduce'] = tf.add(
        inception_4d_5x5_reduce, err_lyr['inception_4d_5x5_reduce'])
    inception_4d_5x5 = conv(layers_err['inception_4d_5x5_reduce'],
                            weights_noisy['inception_4d_5x5'],
                            biases_noisy['inception_4d_5x5'], 1, 1)
    err_lyr['inception_4d_5x5'] = tf.get_variable(
        name='inception_4d_5x5_lyr_err',
        shape=inception_4d_5x5.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4d_5x5'] = tf.add(inception_4d_5x5,
                                            err_lyr['inception_4d_5x5'])
    inception_4d_pool = max_pool(inception_4c_output, 3, 3, 1, 1)
    inception_4d_pool_proj = conv(inception_4d_pool,
                                  weights_noisy['inception_4d_pool_proj'],
                                  biases_noisy['inception_4d_pool_proj'], 1, 1)
    err_lyr['inception_4d_pool_proj'] = tf.get_variable(
        name='inception_4d_pool_proj_lyr_err',
        shape=inception_4d_pool_proj.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4d_pool_proj'] = tf.add(
        inception_4d_pool_proj, err_lyr['inception_4d_pool_proj'])
    inception_4d_output = concat([
        layers_err['inception_4d_1x1'], layers_err['inception_4d_3x3'],
        layers_err['inception_4d_5x5'], layers_err['inception_4d_pool_proj']
    ], 3)
    inception_4e_1x1 = conv(inception_4d_output,
                            weights_noisy['inception_4e_1x1'],
                            biases_noisy['inception_4e_1x1'], 1, 1)
    err_lyr['inception_4e_1x1'] = tf.get_variable(
        name='inception_4e_1x1_lyr_err',
        shape=inception_4e_1x1.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4e_1x1'] = tf.add(inception_4e_1x1,
                                            err_lyr['inception_4e_1x1'])
    inception_4e_3x3_reduce = conv(inception_4d_output,
                                   weights_noisy['inception_4e_3x3_reduce'],
                                   biases_noisy['inception_4e_3x3_reduce'], 1,
                                   1)
    err_lyr['inception_4e_3x3_reduce'] = tf.get_variable(
        name='inception_4e_3x3_reduce_lyr_err',
        shape=inception_4e_3x3_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4e_3x3_reduce'] = tf.add(
        inception_4e_3x3_reduce, err_lyr['inception_4e_3x3_reduce'])
    inception_4e_3x3 = conv(layers_err['inception_4e_3x3_reduce'],
                            weights_noisy['inception_4e_3x3'],
                            biases_noisy['inception_4e_3x3'], 1, 1)
    err_lyr['inception_4e_3x3'] = tf.get_variable(
        name='inception_4e_3x3_lyr_err',
        shape=inception_4e_3x3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4e_3x3'] = tf.add(inception_4e_3x3,
                                            err_lyr['inception_4e_3x3'])
    inception_4e_5x5_reduce = conv(inception_4d_output,
                                   weights_noisy['inception_4e_5x5_reduce'],
                                   biases_noisy['inception_4e_5x5_reduce'], 1,
                                   1)
    err_lyr['inception_4e_5x5_reduce'] = tf.get_variable(
        name='inception_4e_5x5_reduce_lyr_err',
        shape=inception_4e_5x5_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4e_5x5_reduce'] = tf.add(
        inception_4e_5x5_reduce, err_lyr['inception_4e_5x5_reduce'])
    inception_4e_5x5 = conv(layers_err['inception_4e_5x5_reduce'],
                            weights_noisy['inception_4e_5x5'],
                            biases_noisy['inception_4e_5x5'], 1, 1)
    err_lyr['inception_4e_5x5'] = tf.get_variable(
        name='inception_4e_5x5_lyr_err',
        shape=inception_4e_5x5.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4e_5x5'] = tf.add(inception_4e_5x5,
                                            err_lyr['inception_4e_5x5'])
    inception_4e_pool = max_pool(inception_4d_output, 3, 3, 1, 1)
    inception_4e_pool_proj = conv(inception_4e_pool,
                                  weights_noisy['inception_4e_pool_proj'],
                                  biases_noisy['inception_4e_pool_proj'], 1, 1)
    err_lyr['inception_4e_pool_proj'] = tf.get_variable(
        name='inception_4e_pool_proj_lyr_err',
        shape=inception_4e_pool_proj.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_4e_pool_proj'] = tf.add(
        inception_4e_pool_proj, err_lyr['inception_4e_pool_proj'])
    inception_4e_output = concat([
        layers_err['inception_4e_1x1'], layers_err['inception_4e_3x3'],
        layers_err['inception_4e_5x5'], layers_err['inception_4e_pool_proj']
    ], 3)
    pool4_3x3_s2 = max_pool(inception_4e_output, 3, 3, 2, 2)
    inception_5a_1x1 = conv(pool4_3x3_s2, weights_noisy['inception_5a_1x1'],
                            biases_noisy['inception_5a_1x1'], 1, 1)
    err_lyr['inception_5a_1x1'] = tf.get_variable(
        name='inception_5a_1x1_lyr_err',
        shape=inception_5a_1x1.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5a_1x1'] = tf.add(inception_5a_1x1,
                                            err_lyr['inception_5a_1x1'])
    inception_5a_3x3_reduce = conv(pool4_3x3_s2,
                                   weights_noisy['inception_5a_3x3_reduce'],
                                   biases_noisy['inception_5a_3x3_reduce'], 1,
                                   1)
    err_lyr['inception_5a_3x3_reduce'] = tf.get_variable(
        name='inception_5a_3x3_reduce_lyr_err',
        shape=inception_5a_3x3_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5a_3x3_reduce'] = tf.add(
        inception_5a_3x3_reduce, err_lyr['inception_5a_3x3_reduce'])
    inception_5a_3x3 = conv(layers_err['inception_5a_3x3_reduce'],
                            weights_noisy['inception_5a_3x3'],
                            biases_noisy['inception_5a_3x3'], 1, 1)
    err_lyr['inception_5a_3x3'] = tf.get_variable(
        name='inception_5a_3x3_lyr_err',
        shape=inception_5a_3x3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5a_3x3'] = tf.add(inception_5a_3x3,
                                            err_lyr['inception_5a_3x3'])
    inception_5a_5x5_reduce = conv(pool4_3x3_s2,
                                   weights_noisy['inception_5a_5x5_reduce'],
                                   biases_noisy['inception_5a_5x5_reduce'], 1,
                                   1)
    err_lyr['inception_5a_5x5_reduce'] = tf.get_variable(
        name='inception_5a_5x5_reduce_lyr_err',
        shape=inception_5a_5x5_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5a_5x5_reduce'] = tf.add(
        inception_5a_5x5_reduce, err_lyr['inception_5a_5x5_reduce'])
    inception_5a_5x5 = conv(layers_err['inception_5a_5x5_reduce'],
                            weights_noisy['inception_5a_5x5'],
                            biases_noisy['inception_5a_5x5'], 1, 1)
    err_lyr['inception_5a_5x5'] = tf.get_variable(
        name='inception_5a_5x5_lyr_err',
        shape=inception_5a_5x5.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5a_5x5'] = tf.add(inception_5a_5x5,
                                            err_lyr['inception_5a_5x5'])
    inception_5a_pool = max_pool(pool4_3x3_s2, 3, 3, 1, 1)
    inception_5a_pool_proj = conv(inception_5a_pool,
                                  weights_noisy['inception_5a_pool_proj'],
                                  biases_noisy['inception_5a_pool_proj'], 1, 1)
    err_lyr['inception_5a_pool_proj'] = tf.get_variable(
        name='inception_5a_pool_proj_lyr_err',
        shape=inception_5a_pool_proj.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5a_pool_proj'] = tf.add(
        inception_5a_pool_proj, err_lyr['inception_5a_pool_proj'])
    inception_5a_output = concat([
        layers_err['inception_5a_1x1'], layers_err['inception_5a_3x3'],
        layers_err['inception_5a_5x5'], layers_err['inception_5a_pool_proj']
    ], 3)
    inception_5b_1x1 = conv(inception_5a_output,
                            weights_noisy['inception_5b_1x1'],
                            biases_noisy['inception_5b_1x1'], 1, 1)
    err_lyr['inception_5b_1x1'] = tf.get_variable(
        name='inception_5b_1x1_lyr_err',
        shape=inception_5b_1x1.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5b_1x1'] = tf.add(inception_5b_1x1,
                                            err_lyr['inception_5b_1x1'])
    inception_5b_3x3_reduce = conv(inception_5a_output,
                                   weights_noisy['inception_5b_3x3_reduce'],
                                   biases_noisy['inception_5b_3x3_reduce'], 1,
                                   1)
    err_lyr['inception_5b_3x3_reduce'] = tf.get_variable(
        name='inception_5b_3x3_reduce_lyr_err',
        shape=inception_5b_3x3_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5b_3x3_reduce'] = tf.add(
        inception_5b_3x3_reduce, err_lyr['inception_5b_3x3_reduce'])
    inception_5b_3x3 = conv(layers_err['inception_5b_3x3_reduce'],
                            weights_noisy['inception_5b_3x3'],
                            biases_noisy['inception_5b_3x3'], 1, 1)
    err_lyr['inception_5b_3x3'] = tf.get_variable(
        name='inception_5b_3x3_lyr_err',
        shape=inception_5b_3x3.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5b_3x3'] = tf.add(inception_5b_3x3,
                                            err_lyr['inception_5b_3x3'])
    inception_5b_5x5_reduce = conv(inception_5a_output,
                                   weights_noisy['inception_5b_5x5_reduce'],
                                   biases_noisy['inception_5b_5x5_reduce'], 1,
                                   1)
    err_lyr['inception_5b_5x5_reduce'] = tf.get_variable(
        name='inception_5b_5x5_reduce_lyr_err',
        shape=inception_5b_5x5_reduce.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5b_5x5_reduce'] = tf.add(
        inception_5b_5x5_reduce, err_lyr['inception_5b_5x5_reduce'])
    inception_5b_5x5 = conv(layers_err['inception_5b_5x5_reduce'],
                            weights_noisy['inception_5b_5x5'],
                            biases_noisy['inception_5b_5x5'], 1, 1)
    err_lyr['inception_5b_5x5'] = tf.get_variable(
        name='inception_5b_5x5_lyr_err',
        shape=inception_5b_5x5.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5b_5x5'] = tf.add(inception_5b_5x5,
                                            err_lyr['inception_5b_5x5'])
    inception_5b_pool = max_pool(inception_5a_output, 3, 3, 1, 1)
    inception_5b_pool_proj = conv(inception_5b_pool,
                                  weights_noisy['inception_5b_pool_proj'],
                                  biases_noisy['inception_5b_pool_proj'], 1, 1)
    err_lyr['inception_5b_pool_proj'] = tf.get_variable(
        name='inception_5b_pool_proj_lyr_err',
        shape=inception_5b_pool_proj.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['inception_5b_pool_proj'] = tf.add(
        inception_5b_pool_proj, err_lyr['inception_5b_pool_proj'])
    inception_5b_output = concat([
        layers_err['inception_5b_1x1'], layers_err['inception_5b_3x3'],
        layers_err['inception_5b_5x5'], layers_err['inception_5b_pool_proj']
    ], 3)
    pool5_7x7_s1 = avg_pool(inception_5b_output, 7, 7, 1, 1, padding='VALID')
    loss3_classifier = fc(pool5_7x7_s1,
                          weights_noisy['loss3_classifier'],
                          biases_noisy['loss3_classifier'],
                          relu=False)
    err_lyr['loss3_classifier'] = tf.get_variable(
        name='loss3_classifier_lyr_err',
        shape=loss3_classifier.shape[1:],
        initializer=tf.random_normal_initializer(mean=err_mean[3],
                                                 stddev=err_stddev[3]),
        trainable=train_vars[3])
    layers_err['loss3_classifier'] = tf.add(loss3_classifier,
                                            err_lyr['loss3_classifier'])
    return layers_err['loss3_classifier'], err_w, err_b, err_lyr
Ejemplo n.º 8
0
def train_one_epoch(sess, ops, train_writer, templates, poses):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.
    print(datetime.now())

    is_training = True
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False

    poses = poses[0:5070, :]
    poses = helper.shuffle_poses(poses)  # Shuffle Poses.

    loss_sum = 0  # Total Loss in each batch.
    num_batches = int(templates.shape[0] /
                      BATCH_SIZE)  # Number of batches in an epoch.

    # Training for each batch.
    for fn in range(num_batches):
        start_idx = fn * BATCH_SIZE  # Start index of poses.
        end_idx = (fn + 1) * BATCH_SIZE  # End index of poses.

        template_data = np.copy(templates[start_idx:end_idx])

        batch_euler_poses = poses[start_idx:
                                  end_idx]  # Extract poses for batch training.
        if SPARSE_SAMPLING > 0:
            template_data, source_data = helper.split_template_source(
                template_data,
                batch_euler_poses,
                NUM_POINT,
                centroid_subtraction_switch,
                ADD_NOISE,
                S_RAND_POINTS,
                SPARSE=SPARSE_SAMPLING)
        else:
            if template_random_pose:
                template_data = helper.apply_transformation(
                    template_data, batch_euler_poses / 2)
                template_data = template_data - np.mean(
                    template_data, axis=1, keepdims=True)

            source_data = helper.apply_transformation(
                template_data, batch_euler_poses
            )  # Apply the poses on the templates to get source data.

            if centroid_subtraction_switch:
                source_data = source_data - np.mean(
                    source_data, axis=1, keepdims=True)
                # template_data = template_data - np.mean(template_data, axis=1, keepdims=True)

            # Chose Random Points from point clouds for training.
            if np.random.random_sample() < S_RAND_POINTS:
                template_data = helper.select_random_points(
                    template_data, NUM_POINT)
                source_data = helper.select_random_points(
                    source_data, NUM_POINT
                )  # 50% probability that source data has different points than template
            else:
                source_data = source_data[:, 0:NUM_POINT, :]
            if np.random.random_sample() < ADD_NOISE:
                source_data = helper.add_noise(source_data)

            # Only chose limited number of points from the source and template data.
            source_data = source_data[:, 0:NUM_POINT, :]
            template_data = template_data[:, 0:NUM_POINT, :]

            # To visualize the source and point clouds:
            if display_ptClouds:
                helper.display_clouds_data(source_data[0])
                helper.display_clouds_data(template_data[0])

        if FLAGS.add_occlusions > 0.0:
            source_data = helper.add_occlusions(source_data,
                                                FLAGS.add_occlusions)

        TRANSFORMATIONS = np.identity(
            4)  # Initialize identity transformation matrix.
        TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
            BATCH_SIZE, 4,
            4)  # Intialize identity matrices of size equal to batch_size

        # Iterations for pose refinement.
        for loop_idx in range(MAX_LOOPS - 1):
            # 4a
            # Feed the placeholders of Network with template data and source data.
            if ADD_NOISE_MODEL:
                feed_dict = {
                    ops['source_pointclouds_pl']: source_data,
                    ops['template_pointclouds_pl']: template_data,
                    ops['is_training_pl']: is_training,
                    ops['is_training_pl_1']: False,
                    ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT]),
                    ops['add_noise']: np.zeros([BATCH_SIZE, NUM_POINT, 3])
                }
            else:
                feed_dict = {
                    ops['source_pointclouds_pl']: source_data,
                    ops['template_pointclouds_pl']: template_data,
                    ops['is_training_pl']: is_training,
                    ops['is_training_pl_1']: False,
                    ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT])
                }
            if bool(FLAGS.train_single
                    ):  #train every iteration, or only on the Nth iter
                summary, step, _, loss_val, predicted_transformation = sess.run(
                    [
                        ops['merged'], ops['step'], ops['train_op'],
                        ops['loss'], ops['predicted_transformation']
                    ],
                    feed_dict=feed_dict)
            else:
                predicted_transformation = sess.run(
                    [ops['predicted_transformation']], feed_dict=feed_dict
                )  # Ask the network to predict the pose.

            # 4b,4c
            # Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
            TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
                predicted_transformation, TRANSFORMATIONS, source_data)

            # Display Results after each iteration.
            if display_poses_in_itr:
                print(predicted_transformation[0, 0:3])
                print(predicted_transformation[0, 3:7] * (180 / np.pi))
            if display_ptClouds_in_itr:
                helper.display_clouds_data(source_data[0])

        # Feed the placeholders of Network with source data and template data obtained from N-Iterations.
        if ADD_NOISE_MODEL:
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training,
                ops['is_training_pl_1']: False,
                ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT]),
                ops['add_noise']: np.zeros([BATCH_SIZE, NUM_POINT, 3])
            }
        else:
            feed_dict = {
                ops['source_pointclouds_pl']: source_data,
                ops['template_pointclouds_pl']: template_data,
                ops['is_training_pl']: is_training,
                ops['is_training_pl_1']: False,
                ops['labels12']: np.ones([BATCH_SIZE, NUM_POINT])
            }

        # Ask the network to predict transformation, calculate loss using distance between actual points, calculate & apply gradients for Network and copy the weights to Network19.
        summary, step, _, loss_val, predicted_transformation = sess.run(
            [
                ops['merged'], ops['step'], ops['train_op'], ops['loss'],
                ops['predicted_transformation']
            ],
            feed_dict=feed_dict)
        train_writer.add_summary(
            summary, step)  # Add all the summary to the tensorboard.

        # Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
        TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
            predicted_transformation, TRANSFORMATIONS, source_data)

        # final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)			# Find the final pose (translation, orientation (euler angles in degrees)) from transformation matrix.

        # Display the ground truth pose and predicted pose for first Point Cloud in batch
        if display_poses:
            print('Ground Truth Position: {}'.format(
                batch_euler_poses[0, 0:3].tolist()))
            print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
            print('Ground Truth Orientation: {}'.format(
                (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
            print('Predicted Orientation: {}'.format(
                (final_pose[0, 3:6] * (180 / np.pi)).tolist()))
            # print(batch_euler_poses[0,0:3],batch_euler_poses[0,3:6]*(180/np.pi))
            # print(final_pose[0,0:3],final_pose[0,3:6]*(180/np.pi))

        # Display Loss Value.
        print("Batch: {} & Loss: {}\r".format(fn, loss_val), end='')

        # Add loss for each batch.
        loss_sum += loss_val
    print('\n')
    log_string(
        'Train Mean loss: %f\n' %
        (loss_sum / num_batches))  # Store and display mean loss of epoch.
Ejemplo n.º 9
0
def test_one_epoch(sess, ops, templates, poses, saver, model_path):
    # Arguments:
    # sess: 		Tensorflow session to handle tensors.
    # ops:			Dictionary for tensors of Network
    # templates:	Training Point Cloud data.
    # poses: 		Training pose data.
    # saver: 		To restore the weights.
    # model_path: 	Path of log directory.

    saver.restore(sess, model_path)  # Restore the weights of trained network.

    is_training = False
    display_ptClouds = False
    display_poses = False
    display_poses_in_itr = False
    display_ptClouds_in_itr = False
    swap_case = False
    MAX_LOOPS = 4

    template_data = np.zeros((BATCH_SIZE, MAX_NUM_POINT,
                              3))  # Extract Templates for batch training.
    template_data[0] = np.copy(templates[FLAGS.template_idx, :, :])

    batch_euler_poses = poses[0].reshape(
        (1, 6))  # Extract poses for batch training.

    # Define test case.
    batch_euler_poses[0] = [
        0.4, 0.5, 0.1, 10 * (np.pi / 180), 20 * (np.pi / 180),
        20 * (np.pi / 180)
    ]
    source_data = helper.apply_transformation(
        template_data, batch_euler_poses
    )  # Apply the poses on the templates to get source data.

    # Chose Random Points from point clouds for training.
    if np.random.random_sample() < 0:
        source_data = helper.select_random_points(
            source_data, NUM_POINT
        )  # probability that source data has different points than template
    else:
        source_data = source_data[:, 0:NUM_POINT, :]
    # Add noise to source point cloud.
    if np.random.random_sample() < 1.0:
        source_data = helper.add_noise(source_data)

    # Only choose limited number of points from the source and template data.
    source_data = source_data[:, 0:NUM_POINT, :]
    template_data = template_data[:, 0:NUM_POINT, :]

    TEMPLATE_DATA = np.copy(
        template_data)  # Store the initial template to visualize results.
    SOURCE_DATA = np.copy(
        source_data)  # Store the initial source to visualize results.

    # Subtract the Centroids from the Point Clouds.
    if centroid_subtraction_switch:
        source_data = source_data - np.mean(source_data, axis=1, keepdims=True)
        template_data = template_data - np.mean(
            template_data, axis=1, keepdims=True)

    # To visualize the source and point clouds:
    if display_ptClouds:
        helper.display_clouds_data(source_data[0])
        helper.display_clouds_data(template_data[0])

    TRANSFORMATIONS = np.identity(
        4)  # Initialize identity transformation matrix.
    TRANSFORMATIONS = npm.repmat(TRANSFORMATIONS, BATCH_SIZE, 1).reshape(
        BATCH_SIZE, 4,
        4)  # Intialize identity matrices of size equal to batch_size

    # Store the transformed point clouds after each iteration.
    ITR = np.zeros((MAX_LOOPS, template_data.shape[0], template_data.shape[1],
                    template_data.shape[2]))

    # Iterations for pose refinement.
    for loop_idx in range(MAX_LOOPS - 1):
        # 4a
        # Feed the placeholders of Network with template data and source data.
        feed_dict = {
            ops['source_pointclouds_pl']: source_data,
            ops['template_pointclouds_pl']: template_data,
            ops['is_training_pl']: is_training
        }
        predicted_transformation = sess.run(
            [ops['predicted_transformation']],
            feed_dict=feed_dict)  # Ask the network to predict the pose.
        #print (predicted_transformation[0])

        # 4b,4c
        # Apply the transformation on the template data and multiply it to transformation matrix obtained in previous iteration.
        TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
            predicted_transformation, TRANSFORMATIONS, source_data)

        # Display Results after each iteration.
        if display_poses_in_itr:
            print(predicted_transformation[0, 0:3])
            print(predicted_transformation[0, 3:7] * (180 / np.pi))
        if display_ptClouds_in_itr:
            helper.display_clouds_data(source_data[0])
        ITR[loop_idx, :, :, :] = source_data

    # Feed the placeholders of Network with source data and template data obtained from N-Iterations.
    feed_dict = {
        ops['source_pointclouds_pl']: source_data,
        ops['template_pointclouds_pl']: template_data,
        ops['is_training_pl']: is_training
    }

    # Ask the network to predict transformation, calculate loss using distance between actual points.
    step, predicted_transformation = sess.run(
        [ops['step'], ops['predicted_transformation']], feed_dict=feed_dict)

    # Apply the final transformation on the template data and multiply it with the transformation matrix obtained from N-Iterations.
    TRANSFORMATIONS, source_data = helper.transformation_quat2mat(
        predicted_transformation, TRANSFORMATIONS, source_data)

    final_pose = helper.find_final_pose_inv(TRANSFORMATIONS)
    final_pose[0, 0:3] = final_pose[0, 0:3] + np.mean(SOURCE_DATA, axis=1)[0]

    title = "Actual T (Red->Green): "
    for i in range(len(batch_euler_poses[0])):
        if i > 2:
            title += str(round(batch_euler_poses[0][i] * (180 / np.pi), 2))
        else:
            title += str(batch_euler_poses[0][i])
        title += ', '
    title += "\nPredicted T (Red->Blue): "
    for i in range(len(final_pose[0])):
        if i > 2:
            title += str(round(final_pose[0, i] * (180 / np.pi), 3))
        else:
            title += str(round(final_pose[0, i], 3))
        title += ', '

    # Display the ground truth pose and predicted pose for first Point Cloud in batch
    if display_poses:
        print('Ground Truth Position: {}'.format(
            batch_euler_poses[0, 0:3].tolist()))
        print('Predicted Position: {}'.format(final_pose[0, 0:3].tolist()))
        print('Ground Truth Orientation: {}'.format(
            (batch_euler_poses[0, 3:6] * (180 / np.pi)).tolist()))
        print('Predicted Orientation: {}'.format(
            (final_pose[0, 3:6] * (180 / np.pi)).tolist()))

    helper.display_three_clouds(TEMPLATE_DATA[0], SOURCE_DATA[0],
                                source_data[0], title)
Ejemplo n.º 10
0
        :param Y:
        :param theta:
        :param x:
        :return:
        """
        k_theta = self.k_theta(Y, theta, x)
        h = s.singe_jacobian(THETA, X[0])
        k_y = hr.prod(h, k_theta, h.T)
        return k_y

s = Solver()
X = hr.repeat(2, X, 0)
F = s.solve(THETA, X)

for item in range(1000):
    Y = hr.add_noise(F, OBJ)
    result_THETA, calc_F = s.wrapper('glsm_theta', Y, X)
    if item == 0:
        out_THETA = result_THETA
        out_E = Y - calc_F
        out_F = calc_F
    else:
        out_THETA = np.append(out_THETA, result_THETA, axis=1)
        out_E = np.append(out_E, Y - calc_F, axis=1)
        out_F = np.append(out_F, calc_F, axis=1)

b0 = np.concatenate((out_THETA[0], out_THETA[1]))
b1 = np.concatenate((out_THETA[2], out_THETA[3]))

q_e = np.concatenate(out_E[::L]).flatten()
p_e = np.concatenate([out_E[i::L] for i in range(2, L - 1)]).flatten()