示例#1
0
def TestOperation(ImgPH, LabelsTrue, ImageSize, ModelPath, DataPath,
                  LabelsPath, LabelsPathPred, Epochs):
    """
    Inputs: 
    ImgPH is the Input Image placeholder
    ImageSize is the size of the image
    ModelPath - Path to load trained model from
    DataPath - Paths of all images where testing will be run on
    LabelsPathPred - Path to save predictions
    Outputs:
    Predictions written to ./TxtFiles/PredOut.txt
    """
    Length = ImageSize[0]
    LabelsTrueAll = ReadLabels(LabelsPath)
    print(np.asarray(LabelsTrue).shape)
    print(LabelsTrue[0, :])
    # Predict output with forward pass, MiniBatchSize for Test is 1
    prLogits = HomographyModel(ImgPH, ImageSize, 1)[0]

    loss = tf.sqrt(
        tf.reduce_mean((tf.squared_difference(prLogits, LabelsTrue))))

    tf.summary.scalar('LossEveryIter', loss)
    # tf.summary.image('PatchBPH', PatchBPH)
    # tf.summary.image('PredB', prLogits)
    MergedSummaryOP = tf.summary.merge_all()

    # Setup Saver
    Saver = tf.train.Saver()
    LogsPath = "/home/p_akanksha94/CMSC733/apatel44_smakam_p1/Phase2-Sup/Code/Logs/Val"

    appendLoss = []

    with tf.Session() as sess:
        Saver.restore(sess, ModelPath)
        print('Number of parameters in this model are %d ' % np.sum([
            np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
        ]))

        Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())

        for count in tqdm(range(np.size(DataPath))):
            DataPathNow = DataPath[count]
            label = np.asarray(LabelsTrueAll[count])
            label = np.reshape(label, (1, 8))
            Img, ImgOrg = ReadImages(ImageSize, DataPathNow)
            FeedDict = {ImgPH: Img, LabelsTrue: label}
            PredT, loss_fetched, Summary = sess.run(
                [prLogits, loss, MergedSummaryOP], FeedDict)

            appendLoss.append(loss_fetched)

            Writer.add_summary(Summary, Epochs * 1000 + count)
            # If you don't flush the tensorboard doesn't update until a lot of iterations!
            Writer.flush()

        return np.mean(appendLoss)
def TestOperation(ImgPH, ImageSize, ModelPath, DataPath):
    """
    Inputs: 
    ImgPH is the Input Image placeholder
    ImageSize is the size of the image
    ModelPath - Path to load trained model from
    DataPath - Paths of all images where testing will be run on
    LabelsPathPred - Path to save predictions
    Outputs:
    Predictions written to ./TxtFiles/PredOut.txt
    """
    Length = ImageSize[0]
    # Predict output with forward pass, MiniBatchSize for Test is 1
    H4Pt, _ = HomographyModel(ImgPH, ImageSize, 1)

    # Setup Saver
    Saver = tf.train.Saver()

    errorSum = 0
    with tf.Session() as sess:
        Saver.restore(sess, ModelPath)
        print('Number of parameters in this model are %d ' % np.sum([
            np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
        ]))

        for count in tqdm(range(np.size(DataPath))):
            DataPathNow = DataPath[count]
            Img, ImgOrg, Label, Corners = ReadImages(ImageSize, DataPathNow)

            FeedDict = {ImgPH: Img}
            Pred = sess.run(H4Pt, FeedDict)
            #print ("pred ", Pred)
            #print ("label ", Label)
            #print ("---------> ", Corners)
            errorSum += np.aqrt(np.mean((Pred - Label) * (Pred - Label)))
            #print ("---------> ", Pred)
            BoundingBox(ImgOrg, Corners, Label, Pred.reshape(-1), count)

    print('The average 2-norm error is %f ' % (errorSum / np.size(DataPath)))
示例#3
0
    def TestOperation(self, ImgPH, ImageSize, ModelPath, DataPath,
                      LabelsPathPred):
        """
        Inputs: 
        ImgPH is the Input Image placeholder
        ImageSize is the size of the image
        ModelPath - Path to load trained model from
        DataPath - Paths of all images where testing will be run on
        LabelsPathPred - Path to save predictions
        Outputs:
        Predictions written to ./TxtFiles/PredOut.txt
        """
        Length = ImageSize[0]
        # Predict output with forward pass, MiniBatchSize for Test is 1
        _, prSoftMaxS = HomographyModel(ImgPH, ImageSize, 1)

        # Setup Saver
        Saver = tf.train.Saver()

        with tf.Session() as sess:
            Saver.restore(sess, ModelPath)
            print('Number of parameters in this model are %d ' % np.sum([
                np.prod(v.get_shape().as_list())
                for v in tf.trainable_variables()
            ]))

            OutSaveT = open(LabelsPathPred, 'w')

            for count in tqdm(range(np.size(DataPath))):
                DataPathNow = DataPath[count]
                Img, ImgOrg = self.ReadImages(ImageSize, DataPathNow)
                FeedDict = {ImgPH: Img}
                PredT = np.argmax(sess.run(prSoftMaxS, FeedDict))

                OutSaveT.write(str(PredT) + '\n')

            OutSaveT.close()
示例#4
0
def model(im, im1, modelPath):
	ImageSize = [128,128,2]
	ImgPH = tf.placeholder(tf.float32, shape=(1, ImageSize[0], ImageSize[1], ImageSize[2]))
	HomoPH = tf.placeholder(tf.float32, shape=(1, ImageSize[0], ImageSize[1], 1)) # Output image
	OutPH = tf.placeholder(tf.float32, shape=(1, ImageSize[0], ImageSize[1], 1)) # Output image
	LabelPH = tf.placeholder(tf.float32, shape=(1, 8,1))
	learning_rate = tf.placeholder(tf.float32, shape=[])

	H4Pt = HomographyModel(ImgPH, ImageSize, 1)
	H_inv = DLT(1, H4Pt)
	out_size = tf.convert_to_tensor(np.array([128,128]),dtype=tf.int32)
	output, condition = transformer(OutPH, H_inv, out_size)

	Saver = tf.train.Saver()
	
	with tf.Session() as sess:
		Saver.restore(sess, modelPath)
		FeedDict = {ImgPH: im, OutPH:im1 }
		H4pt_pred, H_pred, out_image = sess.run([H4Pt, H_inv, output], FeedDict)
	print(H_pred)
	print(H_pred.shape)
	print(H4pt_pred)
	# cv2.imwrite("pred.jpg", out_image)
	return out_image,H4pt_pred, H_pred
示例#5
0
def runTest(firstImage, ModelPath):
    #load image
    cropSize = 128
    resize = (320, 240)
    rho = 16
    firstI = cv2.imread(firstImage)

    image1 = cv2.imread(firstImage, cv2.IMREAD_GRAYSCALE)
    image = cv2.resize(image1, resize)
    #get a random x and y location that does not have the borders
    #x is Y and y is X!
    getLocX = random.randint(105, 160)
    getLocY = random.randint(105, 225)
    #crop the image
    patchA = image[getLocX - int(cropSize / 2):getLocX + int(cropSize / 2),
                   getLocY - int(cropSize / 2):getLocY + int(cropSize / 2)]

    #perturb image randomly and apply homography
    pts1 = np.float32([[
        getLocY - cropSize / 2 + random.randint(-rho, rho),
        getLocX - cropSize / 2 + random.randint(-rho, rho)
    ],
                       [
                           getLocY + cropSize / 2 + random.randint(-rho, rho),
                           getLocX - cropSize / 2 + random.randint(-rho, rho)
                       ],
                       [
                           getLocY + cropSize / 2 + random.randint(-rho, rho),
                           getLocX + cropSize / 2 + random.randint(-rho, rho)
                       ],
                       [
                           getLocY - cropSize / 2 + random.randint(-rho, rho),
                           getLocX + cropSize / 2 + random.randint(-rho, rho)
                       ]])
    pts2 = np.float32([[getLocY - cropSize / 2, getLocX - cropSize / 2],
                       [getLocY + cropSize / 2, getLocX - cropSize / 2],
                       [getLocY + cropSize / 2, getLocX + cropSize / 2],
                       [getLocY - cropSize / 2, getLocX + cropSize / 2]])

    #get the perspective transform
    hAB = cv2.getPerspectiveTransform(pts2, pts1)
    #get the inverse
    hBA = np.linalg.inv(hAB)
    #get the warped image from the inverse homography generated in the dataset
    warped = np.asarray(cv2.warpPerspective(image, hAB,
                                            resize)).astype(np.uint8)
    #get the last patchB at the same location but on the warped image.
    patchB = warped[getLocX - int(cropSize / 2):getLocX + int(cropSize / 2),
                    getLocY - int(cropSize / 2):getLocY + int(cropSize / 2)]

    cv2.line(firstI, (pts1[0][0], pts1[0][1]), (pts1[1][0], pts1[1][1]),
             (255, 0, 0), 3)
    cv2.line(firstI, (pts1[1][0], pts1[1][1]), (pts1[2][0], pts1[2][1]),
             (255, 0, 0), 3)
    cv2.line(firstI, (pts1[2][0], pts1[2][1]), (pts1[3][0], pts1[3][1]),
             (255, 0, 0), 3)
    cv2.line(firstI, (pts1[3][0], pts1[3][1]), (pts1[0][0], pts1[0][1]),
             (255, 0, 0), 3)

    ImageSize = [128, 128, 2]
    ImgPH = tf.placeholder(tf.float32, shape=(1, 128, 128, 2))

    H4pt = HomographyModel(ImgPH, ImageSize, 1)
    Saver = tf.train.Saver()

    with tf.Session() as sess:
        Saver.restore(sess, ModelPath)
        print('Number of parameters in this model are %d ' % np.sum([
            np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
        ]))

        Img = np.dstack((patchA, patchB))
        image = Img
        Img = np.array(Img).reshape(1, 128, 128, 2)

        FeedDict = {ImgPH: Img}
        PredT = sess.run(H4pt, FeedDict)

    newPointsDiff = PredT.reshape(4, 2)
    print(newPointsDiff)
    pts2 = np.float32([[getLocY - cropSize / 2, getLocX - cropSize / 2],
                       [getLocY + cropSize / 2, getLocX - cropSize / 2],
                       [getLocY + cropSize / 2, getLocX + cropSize / 2],
                       [getLocY - cropSize / 2, getLocX + cropSize / 2]])
    pts1 = pts2 + newPointsDiff
    H4pts = pts2 - pts1
    hAB = cv2.getPerspectiveTransform(pts2, pts1)
    hBA = np.linalg.inv(hAB)

    cv2.line(firstI, (pts1[0][0], pts1[0][1]), (pts1[1][0], pts1[1][1]),
             (0, 0, 255), 3)
    cv2.line(firstI, (pts1[1][0], pts1[1][1]), (pts1[2][0], pts1[2][1]),
             (0, 0, 255), 3)
    cv2.line(firstI, (pts1[2][0], pts1[2][1]), (pts1[3][0], pts1[3][1]),
             (0, 0, 255), 3)
    cv2.line(firstI, (pts1[3][0], pts1[3][1]), (pts1[0][0], pts1[0][1]),
             (0, 0, 255), 3)

    cv2.imwrite('result' + '.png', firstI)
示例#6
0
def TrainOperation(ImgPH, HomoPH, OutPH, LabelPH, DirNamesTrain1, DirNamesTrain2, TrainLabels, NumTrainSamples, ImageSize,
				   NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
				   DivTrain, LatestFile, BasePath, LogsPath, ModelType, DirNamesValid1, DirNamesValid2, 
				   NumValidSamples, ValidLabels, lr, OptimizerHomography):
	"""
	Inputs: 
	ImgPH is the Input Image placeholder
	LabelPH is the one-hot encoded label placeholder
	DirNamesTrain - Variable with Subfolder paths to train files
	TrainLabels - Labels corresponding to Train/Test
	NumTrainSamples - length(Train)
	ImageSize - Size of the image
	NumEpochs - Number of passes through the Train data
	MiniBatchSize is the size of the MiniBatch
	SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
	CheckPointPath - Path to save checkpoints/model
	DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
	LatestFile - Latest checkpointfile to continue training
	BasePath - Path to COCO folder without "/" at the end
	LogsPath - Path to save Tensorboard Logs
	ModelType - Supervised or Unsupervised Model
	Outputs:
	Saves Trained network in CheckPointPath and Logs to LogsPath
	"""      
	# Predict output with forward pass

	# with tf.name_scope("Hmography_Net"):

	H4Pt = HomographyModel(ImgPH, ImageSize, MiniBatchSize)
	
	with tf.name_scope("Tensor_DLT"):
		H_inv = DLT(MiniBatchSize, H4Pt)
	
	out_size = tf.convert_to_tensor(np.array([128,128]),dtype=tf.int32)
	
	with tf.name_scope("Spatial_Transformer_Layer"):
		output, condition = transformer(OutPH, H_inv, out_size)
		img_summary = tf.summary.image('Warped_images', output)
		actual_out_summary = tf.summary.image('Match_with_this', OutPH)
		input_image_summary = tf.summary.image('Input_Image', HomoPH)

	initial_lr = 3*0.001

	############################## Loss functions ###################################
	if ModelType=="Unsup":
		with tf.name_scope('Loss'):
			# loss = tf.nn.l2_loss(prLogits-LabelPH)
			# loss = tf.abs()
			loss = tf.reduce_mean(tf.abs(output - HomoPH))*0.009
			lossSummary = tf.summary.scalar('LossEveryIter', loss)

		with tf.name_scope("ValidationLoss"):
			validation_loss = tf.reduce_mean(tf.abs(output-HomoPH))*0.009
			validationLossSummary = tf.summary.scalar("ValidationLoss ",validation_loss)
	
	else:
		with tf.name_scope('Loss'):
			# loss = tf.nn.l2_loss(prLogits-LabelPH)
			loss = tf.reduce_sum(tf.square(tf.reshape(H4Pt, [MiniBatchSize,8,1]) - LabelPH))/2.0
			lossSummary = tf.summary.scalar('LossEveryIter', loss)

		with tf.name_scope("ValidationLoss"):
			validation_loss = tf.reduce_sum(tf.square(tf.reshape(H4Pt, [MiniBatchSize,8,1]) - LabelPH))/2.0
			validationLossSummary = tf.summary.scalar("ValidationLoss ",validation_loss)
	##################################################################################

	#########################  Optimizers ############################
	if OptimizerHomography=="Adam":
		with tf.name_scope('Adam'):
			Optimizer = tf.train.AdamOptimizer(learning_rate = lr).minimize(loss)
	else:
		with tf.name_scope('SGD'):
			Optimizer = tf.keras.optimizers.SGD(learning_rate = lr, momentum=0.9).minimize(loss)
	###################################################################

	#########################  Erros ##################################
	with tf.name_scope('Error'):
		# mm = tf.reshape()
		msr = tf.abs(tf.reshape(H4Pt, [MiniBatchSize,8,1]) - LabelPH)
		Err = tf.reduce_mean(msr)
		errorSummary = tf.summary.scalar("Error ",Err)

	with tf.name_scope("ValidationError"):
		validation_Err = tf.reduce_mean(tf.abs(tf.reshape(H4Pt, [MiniBatchSize,8,1]) - LabelPH))		
		validationErrorSummary = tf.summary.scalar("ValidationError ",validation_Err)
	######################################################################

	
	#####################  Summaries ###############################
	TrainingSummary = tf.summary.merge([lossSummary,errorSummary])
	ValidationSummary = tf.summary.merge([validationLossSummary, validationErrorSummary])
	images_summary = tf.summary.merge([img_summary, actual_out_summary, input_image_summary	])
	##############################################################
	# Setup Saver
	Saver = tf.train.Saver()
	acc = []
	temp_error = []
	temp_loss = []
	temp_valid_loss = []
	loss_ = []
	with tf.Session() as sess:       
		if LatestFile is not None:
			Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
			# Extract only numbers from the name
			StartEpoch = int(''.join(c for c in LatestFile.split('a')[0] if c.isdigit()))
			print('Loaded latest checkpoint with the name ' + LatestFile + '....')
		else:
			sess.run(tf.global_variables_initializer())
			StartEpoch = 0
			print('New model initialized....')

		# Tensorboard
		Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())
			
		for Epochs in tqdm(range(StartEpoch, NumEpochs)):
			NumIterationsPerEpoch = int(NumTrainSamples/MiniBatchSize/DivTrain)
			if Epochs%8==0:
				initial_lr = initial_lr/10.0
				print("------------------------\n setting new learning rate\n")
				print(initial_lr)
				# MiniBatchSize = MiniBatchSize*2
			for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
				I1Batch, Transform, output_images, labels = GenerateBatch(BasePath, DirNamesTrain1, DirNamesTrain2, TrainLabels, ImageSize, MiniBatchSize, PerEpochCounter)
				
				FeedDict = {ImgPH: I1Batch, HomoPH: Transform, OutPH: output_images , lr: initial_lr, LabelPH:labels }
				_, LossThisBatch, Summary, Im_Summary, ER = sess.run([Optimizer, loss, TrainingSummary, images_summary, Err], feed_dict=FeedDict)
				
				temp_loss.append(LossThisBatch)
				if PerEpochCounter % SaveCheckPoint == 0:
					# Save the Model learnt in this epoch
					# SaveName =  CheckPointPath + str(Epochs) + 'a' + str(PerEpochCounter) + 'model.ckpt'
					# Saver.save(sess,  save_path=SaveName)
					# print('\n' + SaveName + ' Model Saved...')
					# if (Epochs*NumIterationsPerEpoch + PerEpochCounter)>0:
					Writer.add_summary(Im_Summary, Epochs*NumIterationsPerEpoch + PerEpochCounter)
					print("Loss of model : "+str(LossThisBatch))
					# print("-----------------------------------------------------\n")
					# print("ctual values : ")
					# print(actual)
					# print("-----------------------------------------------------\n")
					# print("Predicted valuess : "+str(h4pt_pre))
					# print("-----------------------------------------------------")
					# print("Error valuess : ")
					# print(MSR)
					# print("-----------------------------------------------------")
					# print("Error:: : ")
					# print(ERR)
					# print("-----------------------------------------------------")
				# Tensorboard
				Writer.add_summary(Summary, Epochs*NumIterationsPerEpoch + PerEpochCounter)
				# If you don't flush the tensorboard doesn't update until a lot of iterations!
				Writer.flush()

			######################### Validation ################################
			NumIterationsPerEpoch = int(NumValidSamples/MiniBatchSize)
			for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
				I1Batch, Transform, output_images, labels = GenerateBatch(BasePath, DirNamesValid1, DirNamesValid2, ValidLabels, ImageSize, MiniBatchSize, PerEpochCounter)
				FeedDict = {ImgPH: I1Batch, HomoPH: Transform, OutPH: output_images , lr: initial_lr, LabelPH:labels}				
				valSummary, Im_Summary = sess.run([ValidationSummary, images_summary], feed_dict=FeedDict)
				# temp_valid_loss.append(LossThisBatchValidation)
				Writer.add_summary(valSummary, Epochs*NumIterationsPerEpoch + PerEpochCounter)
				# If you don't flush the tensorboard doesn't update until a lot of iterations!
				Writer.flush()

			# Save model every epoch
			SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
			Saver.save(sess, save_path=SaveName)
			print('\n' + SaveName + ' Model Saved...')
			print("----------------After epoch------------")
			print("Total loss = "+str(np.array(temp_loss).sum()))
			print("Validation loss = "+str(np.array(temp_valid_loss).sum()))
			print("--------------------------------------------")
			temp_loss = []
			temp_valid_loss = []
示例#7
0
def TestOperation(PatchPH, OriginalCornersPH, ImagePH, PatchSize, Perturbation,
                  ModelType, ModelPath, DataPath):
    """
	Inputs: 
	ImgPH is the Input Image placeholder
	ImageSize is the size of the image
	ModelPath - Path to load trained model from
	DataPath - Paths of all images where testing will be run on
	LabelsPathPred - Path to save predictions
	Outputs:
	Predictions written to ./TxtFiles/PredOut.txt
	"""
    # Predict output with forward pass, MiniBatchSize for Test is 1

    predictedPatch, H4Pt = HomographyModel(PatchPH, OriginalCornersPH, ImagePH,
                                           PatchSize, 1, Perturbation,
                                           ModelType)

    # Setup Saver
    Saver = tf.train.Saver()

    with tf.Session() as sess:
        Saver.restore(sess, ModelPath)
        print('Number of parameters in this model are %d ' % np.sum([
            np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
        ]))

        #OutSaveT = open(LabelsPathPred, 'w')

        EPE = 0
        averageL2 = 0

        for count in tqdm(range(np.size(DataPath))):
            DataPathNow = DataPath[count]
            Image, Patch, GroundTruth, OriginalCorners = ReadImages(
                DataPathNow, PatchSize, Perturbation)
            FeedDict = {
                PatchPH: Patch,
                OriginalCornersPH: OriginalCorners,
                ImagePH: Image
            }
            H4Pt_out = sess.run(H4Pt, FeedDict)

            #print((H4Pt_out * Perturbation))
            #print((GroundTruth * Perturbation))

            ground = (OriginalCorners + (H4Pt_out * Perturbation)).reshape(
                (4, 2)).astype(np.int32)
            ground[[2, 3]] = ground[[3, 2]]
            pred = (OriginalCorners + (GroundTruth * Perturbation)).reshape(
                (4, 2)).astype(np.int32)
            pred[[2, 3]] = pred[[3, 2]]

            #print(ground)
            #print(pred)

            #print(np.square((H4Pt_out) - (GroundTruth))/2)

            averageL2 += np.sum(np.square((H4Pt_out) - (GroundTruth))) / 2

            Image = (Image[0] * 255).astype('uint8')  #cv2.imread(DataPathNow)
            #Image[0] = (Image[0]*255).astype('uint8')

            cv2.polylines(Image, [ground], True, (255, 255, 255), thickness=3)
            cv2.polylines(Image, [pred], True, (0, 0, 0), thickness=3)
            #cv2.imshow("", Image[0])
            #cv2.waitKey()

            #cv2.imwrite('./Images/ValImages/Unsupervised/' + str(count) + '.jpg', Image)

            EPE += np.sum(
                np.square((H4Pt_out * Perturbation) -
                          (GroundTruth * Perturbation))) / 2

        EPE /= np.size(DataPath)

        averageL2 /= np.size(DataPath)

        print("Average EPE: " + str(EPE))
        print("Average L2: " + str(averageL2))
示例#8
0
def TestOperation(ImgPH, training, pics, ModelPath):
    """
    Inputs:
    ImgPH is the Input Image placeholder
    training specifies training/testing for the batch normalization
    net_inp is the input image stack to obtain homography for
    ModelPath - Path to load trained model from
    """
    Length = 1
    ImageSize = (128,128,2)

    # Predict output with forward pass, MiniBatchSize for Test is 1
    H4Pt = HomographyModel(ImgPH, ImageSize, 1, training)

    # Setup Saver for model restoration
    Saver = tf.train.Saver()

    with tf.Session() as sess:
        Saver.restore(sess, ModelPath)
        im2 = pics[0]
        pics = pics[1:]
        npics = 1

        # Loop over all the pics in the directory
        while pics:
            print('\nUsing image #'+str(npics))
            im1 = pics[0]
            pics = pics[1:]
            im1hgt,im1width,im1channels = im1.shape
            im2hgt,im2width,im2channels = im2.shape
            im1gray = cv2.cvtColor(im1,cv2.COLOR_RGB2GRAY)
            im2gray = cv2.cvtColor(im2,cv2.COLOR_RGB2GRAY)

            # Resize to fit model
            im1_resz = cv2.resize(im1gray, (320, 240))
            im2_resz = cv2.resize(im2gray, (320, 240))

            # TODO: Take multiple patches and average the homography

            # Extract patches from the images of size 128x128
            y_0 = np.random.randint(35, 75)
            x_0 = np.random.randint(35, 150)

            psize = 128 # patch-size

            # Coordinates of initial patch
            C_a = np.array([[y_0,x_0],
                   [y_0,x_0+psize],
                   [y_0+psize,x_0+psize],
                   [y_0+psize,x_0]], np.int32)

            # Extract patch
            P_a = im1_resz[C_a[0][0]:C_a[2][0], C_a[0][1]:C_a[1][1]]
            P_b = im2_resz[C_a[0][0]:C_a[2][0], C_a[0][1]:C_a[1][1]]

            # Stack the images into 1 to get the input for the DL model
            net_inp = np.zeros((P_a.shape[0], P_a.shape[1], 2),dtype=np.float32)
            net_inp[:,:,0] = P_a
            net_inp[:,:,1] = P_b

            # Standardize the input by subtracting mean and division by std
            net_inp = (net_inp - 80.0)/80.0

            '''
            Obtain homography using DL model
            '''
            ImageSize = net_inp.shape
            # print(ImageSize)
            #
            # ImgPH = tf.placeholder(tf.float32, shape=(1, ImageSize[0], ImageSize[1], 2))
            # training = tf.placeholder(tf.bool, name='training')

            # Run the net and get the 4-pt homography as output
            net_inp = np.expand_dims(net_inp, axis=0) # To enable passing to the placeholder
            Test_FeedDict = {ImgPH: net_inp, training: False}
            calcH4Pt = sess.run(H4Pt, feed_dict=Test_FeedDict)

            # print(calcH4Pt)

            '''
            Warp and blend the resized images using the obtained 4-pt homography
            '''
            im2 = stitchFromH4pt(points1=C_a.astype(np.float32),h4pt=(calcH4Pt.reshape(4,2)).astype(np.float32),im1=cv2.resize(im1, (320, 240)),im2=cv2.resize(im2, (320, 240)),valid2=None)
            npics+=1
    return im2
示例#9
0
def TrainOperation(ImgPH, LabelPH, training, TrainImages, TrainLabels,
                   ValImages, ValLabels, NumTrainSamples, ImageSize, NumEpochs,
                   MiniBatchSize, SaveCheckPoint, CheckPointPath, DivTrain,
                   LatestFile, BasePath, LogsPath):
    """
    Inputs:
    ImgPH is the Input Image placeholder
    LabelPH is the label placeholder
    TrainImages - Training images file
    TrainLabels - Labels corresponding to Train/Test
    NumTrainSamples - length(Train)
    ImageSize - Size of the image
    NumEpochs - Number of passes through the Train data
    MiniBatchSize is the size of the MiniBatch
    SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
    CheckPointPath - Path to save checkpoints/model
    DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
    LatestFile - Latest checkpointfile to continue training
    BasePath - Path to COCO folder without "/" at the end
    LogsPath - Path to save Tensorboard Logs
    ModelType - Supervised or Unsupervised Model
    Outputs:
    Saves Trained network in CheckPointPath and Logs to LogsPath
    """
    # Predict output with forward pass

    # Convert training to placeholder
    # training = True

    H4Pt = HomographyModel(ImgPH, ImageSize, MiniBatchSize, training)

    with tf.name_scope('Loss'):
        ###############################################
        # Fill your loss function of choice here!
        ###############################################
        diff_tensor = H4Pt - LabelPH
        loss = tf.reduce_mean(tf.norm(diff_tensor, axis=1))

    with tf.name_scope('Adam'):
        ###############################################
        # Fill your optimizer of choice here!
        ###############################################
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            Optimizer = tf.train.AdamOptimizer(
                learning_rate=1e-4).minimize(loss)

    # Tensorboard
    # Create a summary to monitor loss tensor
    tf.summary.scalar('LossEveryIter', loss)
    # tf.summary.histogram('H4Pt', H4Pt)
    # tf.summary.histogram('Difference Tensor', diff_tensor)

    # Merge all summaries into a single operation
    MergedSummaryOP = tf.summary.merge_all()

    # Setup Saver
    Saver = tf.train.Saver()

    with tf.Session() as sess:
        if LatestFile is not None:
            Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
            # Extract only numbers from the name
            StartEpoch = int(''.join(c for c in LatestFile.split('a')[0]
                                     if c.isdigit()))
            print('Loaded latest checkpoint with the name ' + LatestFile +
                  '....')
        else:
            sess.run(tf.global_variables_initializer())
            StartEpoch = 0
            print('New model initialized....')

        # Tensorboard
        Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())

        for Epochs in tqdm(range(StartEpoch, NumEpochs)):
            NumIterationsPerEpoch = int(NumTrainSamples / MiniBatchSize /
                                        DivTrain)
            EpochLoss = 0
            for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
                I1Batch, LabelBatch = GenerateBatch(BasePath, TrainImages,
                                                    TrainLabels, ImageSize,
                                                    MiniBatchSize)
                FeedDict = {
                    ImgPH: I1Batch,
                    LabelPH: LabelBatch,
                    training: True
                }
                _, LossThisBatch, Summary = sess.run(
                    [Optimizer, loss, MergedSummaryOP], feed_dict=FeedDict)

                # Save checkpoint every some SaveCheckPoint's iterations
                if PerEpochCounter % SaveCheckPoint == 0:
                    # Save the Model learnt in this epoch
                    SaveName = CheckPointPath + str(Epochs) + 'a' + str(
                        PerEpochCounter) + 'model.ckpt'
                    Saver.save(sess, save_path=SaveName)
                    print('\n' + SaveName + ' Model Saved...')

                # Tensorboard
                Writer.add_summary(
                    Summary, Epochs * NumIterationsPerEpoch + PerEpochCounter)
                # If you don't flush the tensorboard doesn't update until a lot of iterations!
                Writer.flush()
                EpochLoss = EpochLoss + LossThisBatch

            # Print out loss per epoch
            EpochLoss = EpochLoss / NumIterationsPerEpoch
        print('Epoch number: ' + str(Epochs) + ',Epoch Loss: ' +
              str(EpochLoss))

        # Tensorboards
        ELoss = tf.Summary()
        ELoss.value.add(tag='Epoch Loss', simple_value=EpochLoss)
        Writer.add_summary(ELoss, Epochs)
        # Writer.flush()

        # Validation Loss
        Val_I1Batch, Val_LabelBatch = GenerateBatch(BasePath, ValImages,
                                                    ValLabels, ImageSize,
                                                    MiniBatchSize)
        Val_FeedDict = {
            ImgPH: Val_I1Batch,
            LabelPH: Val_LabelBatch,
            training: False
        }
        ValLoss = sess.run(loss, feed_dict=Val_FeedDict)
        print(', Val Loss: ' + str(ValLoss))

        # Tensorboard - validation loss
        ValLossSummary = tf.Summary()
        ValLossSummary.value.add(tag='Validation Loss', simple_value=ValLoss)
        Writer.add_summary(ValLossSummary, Epochs)
        Writer.flush()

        # Save model every epoch
        SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
        Saver.save(sess, save_path=SaveName)
        print('\n' + SaveName + ' Model Saved...')
示例#10
0
def TrainOperation(ImgPH, LabelPH, DirNamesTrain, TrainLabels, NumTrainSamples,
                   ImageSize, NumEpochs, MiniBatchSize, SaveCheckPoint,
                   CheckPointPath, DivTrain, LatestFile, BasePath, LogsPath,
                   ModelType):
    """
    Inputs: 
    ImgPH is the Input Image placeholder
    LabelPH is the one-hot encoded label placeholder
    DirNamesTrain - Variable with Subfolder paths to train files
    TrainLabels - Labels corresponding to Train/Test
    NumTrainSamples - length(Train)
    ImageSize - Size of the image
    NumEpochs - Number of passes through the Train data
    MiniBatchSize is the size of the MiniBatch
    SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
    CheckPointPath - Path to save checkpoints/model
    DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
    LatestFile - Latest checkpointfile to continue training
    BasePath - Path to COCO folder without "/" at the end
    LogsPath - Path to save Tensorboard Logs
	ModelType - Supervised or Unsupervised Model
    Outputs:
    Saves Trained network in CheckPointPath and Logs to LogsPath
    """

    # keep_prob for dropout
    keep_prob = 0.5
    # print('Image PH',ImgPH.shape)
    # Predict output with forward pass
    prLogits = HomographyModel(ImgPH, ImageSize, MiniBatchSize, keep_prob)
    # sess1 = tf.Session()
    # print("size of prLogits: ")
    # print1 = tf.Print(prLogits,[prLogits])
    # print("size of LabelPH", tf.Print(LabelPH))

    # sess1 = tf.Session()
    # with sess1.as_default():
    #     tensor = tf.range(10)
    #     print_op = tf.print(tensor)
    #     with tf.control_dependencies([print_op]):
    #         out = tf.add(tensor, tensor)
    #     sess1.run(out)

    # sess1.close()
    with tf.name_scope('Loss'):
        print("\nCalculating L2 Loss")
        ###############################################
        # Fill your loss function of choice here!
        ###############################################
        #         loss = tf.reduce_sum(tf.square(tf.subtract(prLogits, TrainLabels))) / 2
        loss = tf.square(prLogits - LabelPH)

        #        len_x = tf.sqrt(tf.reduce_sum(tf.square(prLogits)))
        #        len_y = tf.sqrt(tf.reduce_sum(tf.square(LabelPH)))
        #        loss = tf.sqrt(tf.reduce_sum(tf.square(prLogits/len_x - LabelPH/len_y)))

        print("Loss Calcuation Done!!")
        print("loss = ", loss)


# fc2 = network output
# x2 = true label

    with tf.name_scope('Adam'):
        ###############################################
        # Fill your optimizer of choice here!
        ###############################################
        Optimizer = tf.train.MomentumOptimizer(learning_rate=0.01,
                                               momentum=0.9).minimize(loss)

    # Tensorboard
    # Create a summary to monitor loss tensor
    tf.summary.scalar('LossEveryIter', loss)
    # tf.summary.image('Anything you want', AnyImg)
    # Merge all summaries into a single operation
    MergedSummaryOP = tf.summary.merge_all()

    # Setup Saver
    Saver = tf.train.Saver()

    with tf.Session() as sess:
        if LatestFile is not None:
            Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
            # Extract only numbers from the name
            StartEpoch = int(''.join(c for c in LatestFile.split('a')[0]
                                     if c.isdigit()))
            print('Loaded latest checkpoint with the name ' + LatestFile +
                  '....')
        else:
            sess.run(tf.global_variables_initializer())
            StartEpoch = 0
            print('New model initialized....')

        # Tensorboard
        Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())

        for Epochs in tqdm(range(StartEpoch, NumEpochs)):
            NumIterationsPerEpoch = int(NumTrainSamples / MiniBatchSize /
                                        DivTrain)
            for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
                I1Batch, LabelBatch = GenerateBatch(BasePath, DirNamesTrain,
                                                    TrainLabels, ImageSize,
                                                    MiniBatchSize)
                FeedDict = {ImgPH: I1Batch, LabelPH: LabelBatch}
                print("FeedDict = ", FeedDict)
                #print("Optimizer = ",tf.shape(Optimizer))
                print("loss = ", loss)
                #print("MergedSummaryOP",type(MergedSummaryOP))

                #                _, LossThisBatch, Summary = sess.run([Optimizer, loss, MergedSummaryOP], feed_dict=FeedDict)
                _, LossThisBatch, Summary = sess.run(Optimizer,
                                                     feed_dict=FeedDict)

                # Save checkpoint every some SaveCheckPoint's iterations
                if PerEpochCounter % SaveCheckPoint == 0:
                    # Save the Model learnt in this epoch
                    SaveName = CheckPointPath + str(Epochs) + 'a' + str(
                        PerEpochCounter) + 'model.ckpt'
                    Saver.save(sess, save_path=SaveName)
                    print('\n' + SaveName + ' Model Saved...')

                # Tensorboard
                Writer.add_summary(
                    Summary, Epochs * NumIterationsPerEpoch + PerEpochCounter)
                # If you don't flush the tensorboard doesn't update until a lot of iterations!
                Writer.flush()

            # Save model every epoch
            SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
            Saver.save(sess, save_path=SaveName)
            print('\n' + SaveName + ' Model Saved...')
示例#11
0
def TrainOperation(ImgPH, LabelPH, DirNamesTrain, TrainLabels, NumTrainSamples,
                   ImageSize, NumEpochs, MiniBatchSize, SaveCheckPoint,
                   CheckPointPath, DivTrain, LatestFile, BasePath, LogsPath,
                   ModelType):
    """
	Inputs: 
	ImgPH is the Input Image placeholder
	LabelPH is the one-hot encoded label placeholder
	DirNamesTrain - Variable with Subfolder paths to train files
	TrainLabels - Labels corresponding to Train/Test
	NumTrainSamples - length(Train)
	ImageSize - Size of the image
	NumEpochs - Number of passes through the Train data
	MiniBatchSize is the size of the MiniBatch
	SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
	CheckPointPath - Path to save checkpoints/model
	DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
	LatestFile - Latest checkpointfile to continue training
	BasePath - Path to COCO folder without "/" at the end
	LogsPath - Path to save Tensorboard Logs
	ModelType - Supervised or Unsupervised Model
	Outputs:
	Saves Trained network in CheckPointPath and Logs to LogsPath
	"""
    # Predict output with forward pass

    prLogits, prSoftMax = HomographyModel(ImgPH, ImageSize, MiniBatchSize)

    with tf.name_scope('Loss'):
        ###############################################
        # Fill your loss function of choice here!
        ###############################################
        if (ModelType is not 'Unsup'):
            loss = tf.nn.l2_loss()

    with tf.name_scope('Adam'):
        ###############################################
        # Fill your optimizer of choice here!
        ###############################################
        #Optimizer = ...
        pass

    # Tensorboard
    # Create a summary to monitor loss tensor
    tf.summary.scalar('LossEveryIter', loss)
    # tf.summary.image('Anything you want', AnyImg)
    # Merge all summaries into a single operation
    MergedSummaryOP = tf.summary.merge_all()

    # Setup Saver
    Saver = tf.train.Saver()

    with tf.Session() as sess:
        if LatestFile is not None:
            Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
            # Extract only numbers from the name
            StartEpoch = int(''.join(c for c in LatestFile.split('a')[0]
                                     if c.isdigit()))
            print('Loaded latest checkpoint with the name ' + LatestFile +
                  '....')
        else:
            sess.run(tf.global_variables_initializer())
            StartEpoch = 0
            print('New model initialized....')

        # Tensorboard
        Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())

        for Epochs in tqdm(range(StartEpoch, NumEpochs)):
            NumIterationsPerEpoch = int(NumTrainSamples / MiniBatchSize /
                                        DivTrain)
            for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
                I1Batch, LabelBatch = GenerateBatch(BasePath, DirNamesTrain,
                                                    TrainLabels, ImageSize,
                                                    MiniBatchSize)
                FeedDict = {ImgPH: I1Batch, LabelPH: LabelBatch}
                _, LossThisBatch, Summary = sess.run(
                    [Optimizer, loss, MergedSummaryOP], feed_dict=FeedDict)

                # Save checkpoint every some SaveCheckPoint's iterations
                if PerEpochCounter % SaveCheckPoint == 0:
                    # Save the Model learnt in this epoch
                    SaveName = CheckPointPath + str(Epochs) + 'a' + str(
                        PerEpochCounter) + 'model.ckpt'
                    Saver.save(sess, save_path=SaveName)
                    print('\n' + SaveName + ' Model Saved...')

                # Tensorboard
                Writer.add_summary(
                    Summary, Epochs * NumIterationsPerEpoch + PerEpochCounter)
                # If you don't flush the tensorboard doesn't update until a lot of iterations!
                Writer.flush()

            # Save model every epoch
            SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
            Saver.save(sess, save_path=SaveName)
            print('\n' + SaveName + ' Model Saved...')
示例#12
0
def TestOperation(ImgPH, LabelPH, training, TestImages, TestLabels, ImageSize,
                  ModelPath):
    """
    Inputs:
    ImgPH is the Input Image placeholder
    ImageSize is the size of the image
    ModelPath - Path to load trained model from
    DataPath - Paths of all images where testing will be run on
    LabelsPathPred - Path to save predictions
    Outputs:
    Predictions written to ./TxtFiles/PredOut.txt
    """
    Length = TestImages.shape[0]

    # Predict output with forward pass, MiniBatchSize for Test is 1
    H4Pt = HomographyModel(ImgPH, ImageSize, 1, training)

    # Setup Saver
    Saver = tf.train.Saver()

    with tf.name_scope('Loss'):
        # Calculate EPE loss for 1 image
        diff_tensor = H4Pt - LabelPH
        loss = tf.reduce_mean(tf.norm(H4Pt - LabelPH, axis=1))
        l1_loss = tf.reduce_mean(tf.abs(H4Pt - LabelPH))

    with tf.Session() as sess:
        Saver.restore(sess, ModelPath)
        print('Number of parameters in this model are %d ' % np.sum([
            np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
        ]))

        # OutSaveT = open(LabelsPathPred, 'w')
        EpochLoss = 0
        Epoch_l1_loss = 0
        total_time = 0

        for count in tqdm(range(Length)):
            TestImg = TestImages[count]
            TestImg = np.expand_dims(
                TestImg, axis=0)  # To enable passing to the placeholder

            TestLabel = TestLabels[count]
            TestLabel = TestLabel.reshape(1, 8)

            Test_FeedDict = {
                ImgPH: TestImg,
                LabelPH: TestLabel,
                training: False
            }
            start_time = time.time()
            Loss_Img, calcH4Pt, calcLabelPH, calcdiff, calcl1_loss = sess.run(
                [loss, H4Pt, LabelPH, diff_tensor, l1_loss],
                feed_dict=Test_FeedDict)
            total_time += (time.time() - start_time)
            EpochLoss = EpochLoss + Loss_Img
            Epoch_l1_loss = Epoch_l1_loss + calcl1_loss

            # Debugging
            # print('Image Loss: '+str(Loss_Img))
            # print('\nH4Pt\n')
            # print(calcH4Pt)
            # print('\nLabelPH\n')
            # print(calcLabelPH)
            # print('\nDiff tensor\n')
            # print(calcdiff)

        EPE_Loss = EpochLoss / Length
        Epoch_l1_loss = Epoch_l1_loss / Length
        total_time = total_time / Length
        print('\nTotal testset EPE Loss: ' + str(EPE_Loss))
        print('\nTotal testset L1 Loss: ' + str(Epoch_l1_loss))
        print('\nAverage forward pass runtime: ' + str(total_time))
示例#13
0
def main():
    # Add any Command Line arguments here
    # Parser = argparse.ArgumentParser()
    # Parser.add_argument('--NumFeatures', default=100, help='Number of best features to extract from each image, Default:100')

    # Args = Parser.parse_args()
    # NumFeatures = Args.NumFeatures
    """
	Read a set of images for Panorama stitching
	"""

    Parser = argparse.ArgumentParser()
    Parser.add_argument(
        '--ImageSetBasePath',
        default="../../Phase1/Data/Train/Set1",
        help=
        'Number of best features to extract from each image, Default: ../Data/Train/Set1'
    )
    Parser.add_argument(
        '--NumFeatures',
        default=300,
        type=int,
        help='Number of best features to extract from each image, Default:100')
    Parser.add_argument('--ModelPath',
                        default='../Checkpoints/',
                        help='Path to saved Model, Default: ../Checkpoints/')
    Parser.add_argument(
        '--ModelType',
        default='Sup',
        help=
        'Model type, Supervised or Unsupervised? Choose from Sup and Unsup, Default:Sup'
    )

    Args = Parser.parse_args()
    NumFeatures = Args.NumFeatures
    ImageSetBasePath = Args.ImageSetBasePath
    ModelPath = Args.ModelPath
    ModelType = Args.ModelType

    images = read_images(ImageSetBasePath)
    colored_images = read_images(ImageSetBasePath, color=True)

    num_images = len(images)

    corners = detect_corners(images)

    corner_points = get_corner_points(corners)

    best_corners = []

    for i in range(len(colored_images)):

        b = anms(corner_points[i], NumFeatures)
        best_corners.append(b)

    match_matrix = np.zeros((num_images, num_images))
    best_match_dict = {}

    for i in range(num_images):

        #cv2.imshow("", images[i])
        #cv2.waitKey(0)

        for j in range(num_images):
            if (i != j):

                features1 = compute_features(images[i], best_corners[i])
                features2 = compute_features(images[j], best_corners[j])

                matches = feature_match(images[i], features1, images[j],
                                        features2)

                homography, num_matches, best_matches = ransac(
                    images[i], images[j], matches)

                match_matrix[i, j] = num_matches
                best_match_dict[(i, j)] = best_matches
                #homography_matrix[i,j] = homography

                #print(num_matches)

    top_match = np.where(match_matrix == match_matrix.max())

    top_match = (top_match[0].tolist()[0], top_match[1].tolist()[0])

    best_matches = best_match_dict[top_match]

    print(best_matches)

    for match in range(len(best_matches[0])):

        if (verify_point(images[top_match[0]], best_matches[0][match].pt) and
                verify_point(images[top_match[1]], best_matches[1][match].pt)):

            point1 = np.array(best_matches[0][match].pt).astype(np.int32)
            point2 = np.array(best_matches[1][match].pt).astype(np.int32)

            print(point1)
            print(point2)

            print(np.shape(images[top_match[0]]))
            print(np.shape(images[top_match[0]]))

            break

    originalPatch = images[top_match[0]][point1[1] - 64:point1[1] + 64,
                                         point1[0] - 64:point1[0] + 64]

    perturbedPatch = images[top_match[1]][point2[1] - 64:point2[1] + 64,
                                          point2[0] - 64:point2[0] + 64]

    original_points = np.array([
        point1[0] - 64, point1[1] - 64, point1[0] + 64, point1[1] - 64,
        point1[0] - 64, point1[1] + 64, point1[0] + 64, point1[1] + 64
    ]).reshape(4, 2)
    print(original_points)

    combinedPatch = np.concatenate(
        (originalPatch[:, :, None], perturbedPatch[:, :, None]),
        axis=2).astype(np.float32)
    """
	Obtain Homography using Deep Learning Model (Supervised and Unsupervised)
	"""

    PatchPH = tf.placeholder('float', shape=(1, 128, 128, 2))
    OriginalCornersPH = tf.placeholder(tf.float32, shape=(1, 8))
    ImagePH = tf.placeholder(tf.float32, shape=(1, 240, 320))

    predictedPatch, H4Pt = HomographyModel(PatchPH, OriginalCornersPH, ImagePH,
                                           128, 1, 32, 'Sup')

    # Setup Saver
    Saver = tf.train.Saver()

    with tf.Session() as sess:
        Saver.restore(sess, ModelPath)

        FeedDict = {
            PatchPH: np.expand_dims(combinedPatch / 255, axis=0)
        }  #, OriginalCornersPH: OriginalCorners, ImagePH: Image}
        H4Pt_out = sess.run(H4Pt, FeedDict)

        pred_perturations = np.reshape(H4Pt_out[0] * 32,
                                       (4, 2)).astype(np.int32)
        print(pred_perturations)

    predicted = pred_perturations + original_points
    print(predicted)

    homography = cv2.getPerspectiveTransform(
        predicted.astype(np.float32), original_points.astype(np.float32))

    print(homography)

    result = warpTwoImages(images[top_match[1]], images[top_match[0]],
                           homography)

    cv2.imshow("", result)
    cv2.waitKey(0)
    """
示例#14
0
def trainSup(ImgPH, LabelPH, DirNamesTrain, TrainLabels, NumTrainSamples,
             ImageSize, NumEpochs, MiniBatchSize, SaveCheckPoint,
             CheckPointPath, DivTrain, LatestFile, BasePath, LogsPath,
             ModelType, TrainingSampleSize):
    """
	Inputs: 
	ImgPH is the Input Image placeholder
	LabelPH is the one-hot encoded label placeholder
	DirNamesTrain - Variable with Subfolder paths to train files
	TrainLabels - Labels corresponding to Train/Test
	NumTrainSamples - length(Train)
	ImageSize - Size of the image
	NumEpochs - Number of passes through the Train data
	MiniBatchSize is the size of the MiniBatch
	SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
	CheckPointPath - Path to save checkpoints/model
	DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
	LatestFile - Latest checkpointfile to continue training
	BasePath - Path to COCO folder without "/" at the end
	LogsPath - Path to save Tensorboard Logs
	ModelType - Supervised or Unsupervised Model
	Outputs:
	Saves Trained network in CheckPointPath and Logs to LogsPath
	"""
    # Predict output with forward pass
    H4pt = HomographyModel(ImgPH, ImageSize, MiniBatchSize)

    with tf.name_scope('Loss'):
        ###############################################
        # Fill your loss function of choice here!
        ###############################################
        #LabelPH=tf.reshape(LabelPH,[MiniBatchSize,LabelPH.shape[1:4].num_elements()])
        shapeH4pt = tf.shape(H4pt)
        shapeLabel = tf.shape(LabelPH)
        loss = tf.sqrt(tf.reduce_sum((tf.squared_difference(H4pt, LabelPH))))
    with tf.name_scope('Adam'):
        ###############################################
        # Fill your optimizer of choice here!
        ###############################################
        Optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)

    # Tensorboard
    # Create a summary to monitor loss tensor
    tf.summary.scalar('LossEveryIter', loss)
    # tf.summary.image('Anything you want', AnyImg)
    # Merge all summaries into a single operation
    MergedSummaryOP = tf.summary.merge_all()

    # Setup Saver
    Saver = tf.train.Saver(max_to_keep=NumEpochs)
    with tf.Session() as sess:
        if LatestFile is not None:
            Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
            # Extract only numbers from the name
            StartEpoch = int(''.join(c for c in LatestFile.split('a')[0]
                                     if c.isdigit()))
            print('Loaded latest checkpoint with the name ' + LatestFile +
                  '....')
        else:
            sess.run(tf.global_variables_initializer())
            StartEpoch = 0
            print('New model initialized....')

        # Tensorboard
        Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())
        LossList = []
        for Epochs in tqdm(range(StartEpoch, NumEpochs)):
            NumIterationsPerEpoch = int(NumTrainSamples / MiniBatchSize /
                                        DivTrain)
            appendAcc = []
            BatchLosses = []
            for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
                BatchInput, BatchLabel = BatchGenSup(BasePath, DirNamesTrain,
                                                     TrainLabels, ImageSize,
                                                     MiniBatchSize,
                                                     TrainingSampleSize)
                FeedDict = {ImgPH: BatchInput, LabelPH: BatchLabel}
                _, LossThisBatch, Summary = sess.run(
                    [Optimizer, loss, MergedSummaryOP], feed_dict=FeedDict)
                BatchLosses.append(LossThisBatch)

                # Tensorboard
                Writer.add_summary(
                    Summary, Epochs * NumIterationsPerEpoch + PerEpochCounter)
                # If you don't flush the tensorboard doesn't update until a lot of iterations!
                Writer.flush()

            LossList.append(sum(BatchLosses) / len(BatchLosses))
            with open('TrainingLossData_m.pkl', 'wb') as f:
                pickle.dump([LossList], f)
            # Save model every epoch
            SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
            Saver.save(sess, save_path=SaveName)
            print('\n' + SaveName + ' Model Saved...')
示例#15
0
def TrainOperation(ImgPH, GroundTruthPH, OriginalCornersPH, ImagesPH,
                   DirNamesTrain, DirNamesVal, NumTrainSamples, PatchSize,
                   Perturbation, NumEpochs, MiniBatchSize, SaveCheckPoint,
                   CheckPointPath, DivTrain, LatestFile, BasePath, LogsPath,
                   ModelType):
    """
	Inputs: 
	ImgPH is the Input Image placeholder
	LabelPH is the one-hot encoded label placeholder
	DirNamesTrain - Variable with Subfolder paths to train files
	TrainLabels - Labels corresponding to Train/Test
	NumTrainSamples - length(Train)
	PatchSize - Size of the image
	NumEpochs - Number of passes through the Train data
	MiniBatchSize is the size of the MiniBatch
	SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
	CheckPointPath - Path to save checkpoints/model
	DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
	LatestFile - Latest checkpointfile to continue training
	BasePath - Path to COCO folder without "/" at the end
	LogsPath - Path to save Tensorboard Logs
	ModelType - Supervised or Unsupervised Model
	Outputs:
	Saves Trained network in CheckPointPath and Logs to LogsPath
	"""
    # Predict output with forward pass

    predictedPatch, H4Pt = HomographyModel(ImgPH, OriginalCornersPH, ImagesPH,
                                           PatchSize, MiniBatchSize,
                                           Perturbation, ModelType)

    with tf.name_scope('Loss'):
        ###############################################
        # Fill your loss function of choice here!
        ###############################################
        if (ModelType == 'Sup'):

            H4Ptloss = tf.nn.l2_loss(H4Pt - GroundTruthPH)
            valH4PtLossPerEpoch_ph = tf.placeholder(
                tf.float32, shape=None, name='val_H4Pt_loss_per_epoch')
        else:

            L1loss = tf.math.reduce_sum(
                tf.abs(ImgPH[:, :, :, 1] -
                       predictedPatch[:, :, :, 0])) / (PatchSize**2)
            H4Ptloss = tf.nn.l2_loss(H4Pt - GroundTruthPH)

            valL1LossPerEpoch_ph = tf.placeholder(tf.float32,
                                                  shape=None,
                                                  name='val_L1_loss_per_epoch')
            valH4PtLossPerEpoch_ph = tf.placeholder(
                tf.float32, shape=None, name='val_H4Pt_loss_per_epoch')

    with tf.name_scope('Adam'):
        ###############################################
        # Fill your optimizer of choice here!
        ###############################################
        if (ModelType == 'Sup'):
            Optimizer = tf.train.AdamOptimizer(
                learning_rate=0.0001).minimize(H4Ptloss)
        else:
            Optimizer = tf.train.AdamOptimizer(
                learning_rate=0.00005).minimize(L1loss)

    # Tensorboard
    # Create a summary to monitor loss tensor
    if (ModelType == 'Sup'):
        tf.summary.scalar('H4PtLossEveryIter', H4Ptloss / MiniBatchSize)
    else:
        tf.summary.scalar('H4PtLossEveryIter', H4Ptloss / MiniBatchSize)
        tf.summary.scalar('L1LossEveryIter', L1loss / MiniBatchSize)
    # tf.summary.image('Anything you want', AnyImg)
    # Merge all summaries into a single operation
    MergedSummaryOP = tf.summary.merge_all()

    performance_summaries = []

    if (ModelType == 'Sup'):
        performance_summaries.append(
            tf.summary.scalar('H4PtValLossPerEpoch', valH4PtLossPerEpoch_ph))
    else:
        performance_summaries.append(
            tf.summary.scalar('L1ValLossPerEpoch', valL1LossPerEpoch_ph))
        performance_summaries.append(
            tf.summary.scalar('H4PtValLossPerEpoch', valH4PtLossPerEpoch_ph))

    performance = tf.summary.merge([performance_summaries])

    # Setup Saver
    Saver = tf.train.Saver()

    with tf.Session() as sess:
        if LatestFile is not None:
            Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
            # Extract only numbers from the name
            StartEpoch = int(''.join(c for c in LatestFile.split('a')[0]
                                     if c.isdigit()))
            print('Loaded latest checkpoint with the name ' + LatestFile +
                  '....')
        else:
            sess.run(tf.global_variables_initializer())
            StartEpoch = 0
            print('New model initialized....')

        # Tensorboard
        Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())

        for Epochs in tqdm(range(StartEpoch, NumEpochs)):
            NumIterationsPerEpoch = int(NumTrainSamples / MiniBatchSize /
                                        DivTrain)
            for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
                ImagesBatch, PatchBatch, GroundTruthBatch, OriginalCornersBatch = GenerateBatch(
                    BasePath, DirNamesTrain, PatchSize, Perturbation,
                    MiniBatchSize)

                #print(GroundTruthBatch)

                FeedDict = {
                    ImgPH: PatchBatch,
                    GroundTruthPH: GroundTruthBatch,
                    OriginalCornersPH: OriginalCornersBatch,
                    ImagesPH: ImagesBatch
                }

                if (ModelType == 'Sup'):
                    _, H4PtLossThisBatch, Summary, H4Pt_out = sess.run(
                        [Optimizer, H4Ptloss, MergedSummaryOP, H4Pt],
                        feed_dict=FeedDict)

                else:
                    _, H4PtLossThisBatch, L1LossThisBatch, Summary = sess.run(
                        [Optimizer, H4Ptloss, L1loss, MergedSummaryOP],
                        feed_dict=FeedDict)

                #print(testOutput[2])

                #print(testOutput[5])
                #print(testOutput[6])
                # if(PerEpochCounter % 20 == 0):

                # #Full Image
                # cv2.imshow("", testOutput[1][0,:,:])
                # cv2.waitKey(0)

                # #Warped Image
                # cv2.imshow("", testOutput[0][0])
                # cv2.waitKey(0)

                # print(np.shape(testOutput[4]))

                # #Correct Image
                # cv2.imshow("", testOutput[4][0])
                # cv2.waitKey(0)

                #Warped Patch
                #cv2.imshow("",testOutput[3][0])
                #cv2.waitKey(0)

                # Ground Truth
                #cv2.imshow("", np.array(PatchBatch)[0,:,:,1])
                #cv2.waitKey(0)

                # Save checkpoint every some SaveCheckPoint's iterations
                if PerEpochCounter % SaveCheckPoint == 0:
                    # Save the Model learnt in this epoch
                    SaveName = CheckPointPath + str(Epochs) + 'a' + str(
                        PerEpochCounter) + 'model.ckpt'
                    Saver.save(sess, save_path=SaveName)
                    print('\n' + SaveName + ' Model Saved...')

                # Tensorboard
                Writer.add_summary(
                    Summary, Epochs * NumIterationsPerEpoch + PerEpochCounter)
                # If you don't flush the tensorboard doesn't update until a lot of iterations!
                Writer.flush()

            # Save model every epoch
            SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
            Saver.save(sess, save_path=SaveName)
            print('\n' + SaveName + ' Model Saved...')

            H4PtValLossSum = 0
            L1ValLossSum = 0

            NumIterationsPerEpochVal = int(1000 / MiniBatchSize)

            for PerEpochCounter in tqdm(range(NumIterationsPerEpochVal)):
                ImagesBatch, PatchBatch, GroundTruthBatch, OriginalCornersBatch = GenerateBatch(
                    BasePath, DirNamesVal, PatchSize, Perturbation,
                    MiniBatchSize)

                FeedDict = {
                    ImgPH: PatchBatch,
                    GroundTruthPH: GroundTruthBatch,
                    OriginalCornersPH: OriginalCornersBatch,
                    ImagesPH: ImagesBatch
                }

                if (ModelType == 'Sup'):

                    H4PtLossThisBatch = sess.run([H4Ptloss],
                                                 feed_dict=FeedDict)

                    H4PtValLossSum += H4PtLossThisBatch[0] / MiniBatchSize

                    print("Epoch " + str(PerEpochCounter) + " H4Pt Loss: " +
                          str(H4PtLossThisBatch[0] / MiniBatchSize))

                else:
                    H4PtLossThisBatch, L1LossThisBatch = sess.run(
                        [H4Ptloss, L1loss], feed_dict=FeedDict)
                    H4PtValLossSum += H4PtLossThisBatch / MiniBatchSize
                    L1ValLossSum += L1LossThisBatch / MiniBatchSize

            if (ModelType == 'Sup'):

                Summary = sess.run(performance,
                                   feed_dict={
                                       valH4PtLossPerEpoch_ph:
                                       H4PtValLossSum /
                                       NumIterationsPerEpochVal
                                   })
                Writer.add_summary(Summary, Epochs)
                Writer.flush()

                print("Epoch " + str(Epochs) + " H4Pt Loss: " +
                      str(H4PtValLossSum / NumIterationsPerEpochVal))

            else:
                Summary = sess.run(performance,
                                   feed_dict={
                                       valH4PtLossPerEpoch_ph:
                                       H4PtValLossSum /
                                       NumIterationsPerEpochVal,
                                       valL1LossPerEpoch_ph:
                                       L1ValLossSum / NumIterationsPerEpochVal
                                   })
                Writer.add_summary(Summary, Epochs)
                Writer.flush()

                print("Epoch " + str(Epochs) + " H4Pt Val Loss: " +
                      str(H4PtValLossSum / NumIterationsPerEpochVal))
                print("Epoch " + str(Epochs) + " L1 Val Loss: " +
                      str(L1ValLossSum / NumIterationsPerEpochVal))
示例#16
0
def TrainOperation(ImgPH, ImgOrgPH, LabelPH, CornerPH, IndicesPH,
                   DirNamesTrain, NumTrainSamples, ImageSize, LargeImgSize,
                   NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
                   DivTrain, LatestFile, BasePath, LogsPath, ModelType):
    """
    Inputs: 
    ImgPH is the Input Image placeholder
    LabelPH is the one-hot encoded label placeholder
    DirNamesTrain - Variable with Subfolder paths to train files
    NumTrainSamples - length(Train)
    ImageSize - Size of the image
    NumEpochs - Number of passes through the Train data
    MiniBatchSize is the size of the MiniBatch
    SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
    CheckPointPath - Path to save checkpoints/model
    DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
    LatestFile - Latest checkpointfile to continue training
    BasePath - Path to COCO folder without "/" at the end
    LogsPath - Path to save Tensorboard Logs
    ModelType - Supervised or Unsupervised Model
    Outputs:
    Saves Trained network in CheckPointPath and Logs to LogsPath
    """
    # Predict output with forward pass
    with tf.name_scope('HomographyNet'):
        H4Pt, _ = HomographyModel(ImgPH, ImageSize, MiniBatchSize)

    with tf.name_scope('Supervised'):
        with tf.name_scope('Loss'):
            # lossSup = tf.norm(LabelPH - H4Pt, ord=2)
            lossSup = tf.nn.l2_loss(LabelPH - H4Pt) / MiniBatchSize
        with tf.name_scope('Adam'):
            OptimizerSup = tf.train.AdamOptimizer(learning_rate=1e-2)
            gradsSup = OptimizerSup.compute_gradients(lossSup)
            cappedGradsSup = [(tf.clip_by_value(grad, -20., 20.), var)
                              for grad, var in gradsSup]
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

            with tf.control_dependencies(update_ops):
                TrainStepSup = OptimizerSup.apply_gradients(cappedGradsSup)

    with tf.name_scope('Unsupervised'):
        with tf.name_scope('TensorDLT'):
            #H = tf.reshape(TensorDLT(CornerPH, LabelPH), (-1, 3, 3))
            H = tf.reshape(TensorDLT(CornerPH, H4Pt), (-1, 3, 3))

        with tf.name_scope('SpatialTransformer'):
            # Normalized inverse computation Hinv
            M = np.array([[LargeImgSize[1] / 2.0, 0., LargeImgSize[1] / 2.0],
                          [0., LargeImgSize[0] / 2.0, LargeImgSize[0] / 2.0],
                          [0., 0., 1.]]).astype(np.float32)
            M = tf.expand_dims(tf.constant(M, tf.float32), axis=0)
            M_inv = tf.linalg.pinv(M)
            H = tf.matmul(tf.matmul(M_inv, H), M)
            # Transform image 1 (large image) to image 2
            outSize = (LargeImgSize[0], LargeImgSize[1])
            predImg, _ = transformer(ImgOrgPH, H, outSize)
            #checkout = predImg # TODO

        with tf.name_scope('Cropping'):
            y_t = tf.range(0,
                           MiniBatchSize * LargeImgSize[0] * LargeImgSize[1],
                           LargeImgSize[0] * LargeImgSize[1])
            z = tf.tile(tf.expand_dims(y_t, [1]),
                        [1, ImageSize[0] * ImageSize[1]])
            z = tf.reshape(z, (-1, ImageSize[0], ImageSize[1]))
            cropIndices = z + IndicesPH
            cropIndices_flat = tf.reshape(cropIndices, [-1])
            predImg = tf.reshape(predImg, [-1])
            predPatch = tf.gather(predImg, cropIndices_flat)
            predPatch = tf.reshape(predPatch, [-1, ImageSize[0], ImageSize[1]])
            #checkout2 = predPatch

            warpedPatch = ImgPH[..., 1]
            #checkout3 = warpedPatch

        with tf.name_scope('Loss'):
            lossUnsup = tf.reduce_mean(tf.abs(warpedPatch - predPatch))

        with tf.name_scope('Adam'):
            OptimizerUnsup = tf.train.AdamOptimizer(learning_rate=1e-5)
            gradsUnsup = OptimizerUnsup.compute_gradients(lossUnsup)
            cappedGradsUnsup = [(tf.clip_by_value(grad, -20., 20.), var)
                                for grad, var in gradsUnsup]
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                TrainStepUnsup = OptimizerUnsup.apply_gradients(
                    cappedGradsUnsup)

    # Tensorboard
    # Create a summary to monitor loss tensor
    tf.summary.scalar('SupervisedLossEveryIter', lossSup)
    tf.summary.scalar('UnsupervisedLossEveryIter', lossUnsup)
    # tf.summary.image('Anything you want', AnyImg)
    # Merge all summaries into a single operation
    MergedSummaryOP = tf.summary.merge_all()

    # Setup Saver
    Saver = tf.train.Saver()

    np.random.seed(seed=0)  # Deterministic analysis
    with tf.Session() as sess:
        if LatestFile is not None:
            Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
            # Extract only numbers from the name
            StartEpoch = int(''.join(c for c in LatestFile.split('a')[0]
                                     if c.isdigit()))
            print('Loaded latest checkpoint with the name ' + LatestFile +
                  '....')
        else:
            sess.run(tf.global_variables_initializer())
            StartEpoch = 0
            print('New model initialized....')

        # Tensorboard
        Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())

        for Epochs in tqdm(range(StartEpoch, NumEpochs)):
            NumIterationsPerEpoch = int(NumTrainSamples / MiniBatchSize /
                                        DivTrain)
            for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
                I1Batch, ImgOrgBatch, LabelBatch, CornerBatch, Indices = GenerateBatch(
                    BasePath, DirNamesTrain, ImageSize, LargeImgSize,
                    MiniBatchSize)
                FeedDict = {
                    ImgPH: I1Batch,
                    ImgOrgPH: ImgOrgBatch,
                    LabelPH: LabelBatch,
                    CornerPH: CornerBatch,
                    IndicesPH: Indices
                }

                if ModelType is 'Sup':
                    _, LossThisBatch, Summary = sess.run(
                        [TrainStepSup, lossSup, MergedSummaryOP],
                        feed_dict=FeedDict)
                else:
                    _, LossThisBatch, Summary = sess.run(
                        [TrainStepUnsup, lossUnsup, MergedSummaryOP],
                        feed_dict=FeedDict)
                    '''
                    Pred, predpatch, patch, LossThisBatch, Summary = sess.run([checkout, checkout2, checkout3, lossUnsup, MergedSummaryOP], feed_dict=FeedDict)
                    cv2.imshow('Image', ImgOrgBatch[0])
                    cv2.imshow('PA', (I1Batch[0]).astype(np.uint8)[..., 0])
                    cv2.imshow('PB', (I1Batch[0]).astype(np.uint8)[..., 1])
                    cv2.imshow("warped image", np.array(Pred).astype(np.uint8)[0])
                    cv2.imshow("Predict PB", np.array(predpatch).astype(np.uint8)[0])
                    cv2.imshow("Place Holder PB", np.array(patch).astype(np.uint8)[0])
                    cv2.waitKey()
                    '''

                # Save checkpoint every some SaveCheckPoint's iterations
                if PerEpochCounter % SaveCheckPoint == 0:
                    # Save the Model learnt in this epoch
                    #SaveName =  CheckPointPath + str(Epochs) + 'a' + str(PerEpochCounter) + 'model.ckpt'
                    #Saver.save(sess,  save_path=SaveName)
                    #print('\n' + SaveName + ' Model Saved...')
                    print('\n' + 'Loss This Batch is %f' % LossThisBatch)

                # Tensorboard
                Writer.add_summary(
                    Summary, Epochs * NumIterationsPerEpoch + PerEpochCounter)
                # If you don't flush the tensorboard doesn't update until a lot of iterations!
                Writer.flush()

            # Save model every epoch
            #SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
            SaveName = CheckPointPath + str(50) + 'model.ckpt'
            Saver.save(sess, save_path=SaveName)
            print('\n' + SaveName + ' Model Saved...')
def TrainOperation(ImgPH, LabelPH, C_a_PH, I_a_PH, I_b_PH, NumTrainSamples,
                   ImageSize, NumEpochs, MiniBatchSize, SaveCheckPoint,
                   CheckPointPath, DivTrain, LatestFile, BasePath, LogsPath):
    """
    Inputs:
    ImgPH is the Input Image placeholder
    LabelPH is the label placeholder
    TrainImages - Training images file
    TrainLabels - Labels corresponding to Train/Test
    NumTrainSamples - length(Train)
    ImageSize - Size of the image
    NumEpochs - Number of passes through the Train data
    MiniBatchSize is the size of the MiniBatch
    SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
    CheckPointPath - Path to save checkpoints/model
    DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
    LatestFile - Latest checkpointfile to continue training
    BasePath - Path to COCO folder without "/" at the end
    LogsPath - Path to save Tensorboard Logs
	ModelType - Supervised or Unsupervised Model
    Outputs:
    Saves Trained network in CheckPointPath and Logs to LogsPath
    """
    # Predict output with forward pass
    training = True

    # identity = np.array([[1., 0., 0.],
    #                      [0., 1., 0.],
    #                      [0., 0., 1.]])
    #
    # identity = identity.flatten()
    #
    # theta = tf.Variable(initial_value=identity)
    # theta_t = tf.expand_dims(theta, 0)
    # pdb.set_trace()
    # H9_mat = tf.tile(theta_t, [MiniBatchSize, 1, 1])

    H4Pt = HomographyModel(ImgPH, ImageSize, MiniBatchSize, training)

    H9_mat = DLT(C_a=C_a_PH, H4Pt=H4Pt)

    # Calculate H_inv
    img_w = I_b_PH.get_shape().as_list()[2]
    img_h = I_b_PH.get_shape().as_list()[1]

    M = np.array([[img_w/2.0, 0.0, img_w/2.0],\
                 [0.0, img_h/2.0, img_h/2.0],\
                 [0.0, 0.0, 1.0]]).astype(np.float32)

    Minv = np.linalg.inv(M)

    M_t = tf.constant(M, tf.float32)
    M_rep = tf.tile(tf.expand_dims(M_t, [0]), [MiniBatchSize, 1, 1])

    Minv_t = tf.constant(Minv, tf.float32)
    Minv_rep = tf.tile(tf.expand_dims(Minv_t, [0]), [MiniBatchSize, 1, 1])

    # Convert to H_inv
    H9_inv = tf.matmul(tf.matmul(Minv_rep, H9_mat), M_rep)

    with tf.name_scope('Loss'):
        ###############################################
        # Fill your loss function of choice here!
        ###############################################
        out_size = (img_h, img_w)

        # I_a_PH = tf.expand_dims(I_a_PH, 3)
        # I_b_PH = tf.expand_dims(I_b_PH, 3)

        # Warp the image I_a with the inverse transform
        warped_I_a, _ = transformer(U=I_a_PH, theta=H9_inv, out_size=out_size)
        # pdb.set_trace()
        loss = tf.reduce_mean(tf.abs(warped_I_a - I_b_PH))

    with tf.name_scope('Accuracy'):
        accuracy = tf.reduce_mean(tf.abs(H4Pt - LabelPH))

    with tf.name_scope('Adam'):
        ###############################################
        # Fill your optimizer of choice here!
        ###############################################
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            Optimizer = tf.train.AdamOptimizer(
                learning_rate=1e-4).minimize(loss)

    # Tensorboard
    # Create a summary to monitor loss tensor
    tf.summary.scalar('LossEveryIter', loss)
    tf.summary.scalar('Accuracy', accuracy)

    tf.summary.histogram('I_a_PH', I_a_PH)
    tf.summary.histogram('I_b_PH', I_b_PH)
    tf.summary.histogram('warped_I_a', warped_I_a)
    tf.summary.histogram('H4Pt', H4Pt)

    # Merge all summaries into a single operation
    MergedSummaryOP = tf.summary.merge_all()

    # Setup Saver
    Saver = tf.train.Saver()

    with tf.Session() as sess:
        if LatestFile is not None:
            Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
            # Extract only numbers from the name
            StartEpoch = int(''.join(c for c in LatestFile.split('a')[0]
                                     if c.isdigit()))
            print('Loaded latest checkpoint with the name ' + LatestFile +
                  '....')
        else:
            sess.run(tf.global_variables_initializer())
            StartEpoch = 0
            print('New model initialized....')

        # Tensorboard
        Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())

        for Epochs in tqdm(range(StartEpoch, NumEpochs)):
            NumIterationsPerEpoch = int(NumTrainSamples / MiniBatchSize /
                                        DivTrain)
            EpochLoss = 0
            for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
                I1Batch, LabelBatch, C_a_Batch, I_a_Batch, I_b_Batch = GenerateBatch(
                    BasePath, ImageSize, MiniBatchSize, DataSelect=0)
                FeedDict = {
                    ImgPH: I1Batch,
                    LabelPH: LabelBatch,
                    C_a_PH: C_a_Batch,
                    I_a_PH: I_a_Batch,
                    I_b_PH: I_b_Batch
                }
                _, LossThisBatch, Summary = sess.run(
                    [Optimizer, loss, MergedSummaryOP], feed_dict=FeedDict)

                # Save checkpoint every some SaveCheckPoint's iterations
                if PerEpochCounter % SaveCheckPoint == 0:
                    # Save the Model learnt in this epoch
                    SaveName = CheckPointPath + str(Epochs) + 'a' + str(
                        PerEpochCounter) + 'model.ckpt'
                    Saver.save(sess, save_path=SaveName)
                    print('\n' + SaveName + ' Model Saved...')

                # Tensorboard
                Writer.add_summary(
                    Summary, Epochs * NumIterationsPerEpoch + PerEpochCounter)
                # If you don't flush the tensorboard doesn't update until a lot of iterations!
                Writer.flush()
                EpochLoss = EpochLoss + LossThisBatch

            # Print out loss per epoch
            EpochLoss = EpochLoss / NumIterationsPerEpoch

            # Tensorboard - Epoch Loss
            ELoss = tf.Summary()
            ELoss.value.add(tag='Epoch Loss', simple_value=EpochLoss)
            Writer.add_summary(ELoss, Epochs)
            Writer.flush()

            # Validation loss
            I1Batch_v, LabelBatch_v, C_a_Batch_v, I_a_Batch_v, I_b_Batch_v = GenerateBatch(
                BasePath, ImageSize, MiniBatchSize, DataSelect=1)
            FeedDict_v = {
                ImgPH: I1Batch_v,
                LabelPH: LabelBatch_v,
                C_a_PH: C_a_Batch_v,
                I_a_PH: I_a_Batch_v,
                I_b_PH: I_b_Batch_v
            }
            ValLoss = sess.run(loss, feed_dict=FeedDict_v)

            # Tensorboard - Validation loss
            ValLossSummary = tf.Summary()
            ValLossSummary.value.add(tag='Validation Loss',
                                     simple_value=ValLoss)
            Writer.add_summary(ValLossSummary, Epochs)
            Writer.flush()

            # Save model every epoch
            SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
            Saver.save(sess, save_path=SaveName)
            print('\n' + SaveName + ' Model Saved...')