示例#1
0
def PerturbImage(I1, PerturbNum):
    """
    Data Augmentation
    Inputs: 
    I1 is the input image
    PerturbNum choses type of Perturbation where it ranges from 0 to 5
    0 - No perturbation
    1 - Random gaussian Noise
    2 - Random hue shift
    3 - Random saturation shift
    4 - Random gamma shift
    Outputs:
    Perturbed Image I1
    """
    if(PerturbNum == 0):
        pass
    elif(PerturbNum == 1):
        I1 = iu.GaussianNoise(I1)
    elif(PerturbNum == 2):
        I1 = iu.ShiftHue(I1)
    elif(PerturbNum == 3):
        I1 = iu.ShiftSat(I1)
    elif(PerturbNum == 4):
        I1 = iu.Gamma(I1)
        
    return I1
示例#2
0
def GenerateBatch(BasePath, DirNamesTrain, TrainLabels, ImageSize,
                  MiniBatchSize):
    """
	Inputs: 
	BasePath - Path to CIFAR10 folder without "/" at the end
	DirNamesTrain - Variable with Subfolder paths to train files
	NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
	TrainLabels - Labels corresponding to Train
	NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
	ImageSize - Size of the Image
	MiniBatchSize is the size of the MiniBatch
	Outputs:
	I1Batch - Batch of images
	LabelBatch - Batch of one-hot encoded labels 
	"""
    I1Batch = []
    LabelBatch = []

    ImageNum = 0
    while ImageNum < MiniBatchSize:
        # Generate random image
        RandIdx = random.randint(0, len(DirNamesTrain) - 1)

        RandImageName = BasePath + os.sep + DirNamesTrain[RandIdx] + '.png'
        ImageNum += 1
        ##########################################################
        # Add any standardization or data augmentation here!
        ##########################################################
        #Flipping Augmentation
        I1 = np.float32(cv2.imread(RandImageName))
        I1 = iu.StandardizeInputs(I1)
        Label = convertToOneHot(TrainLabels[RandIdx], 10)
        # Append All Images and Mask
        I1Batch.append(I1)
        LabelBatch.append(Label)

        I1Flipped = np.float32(cv2.flip(cv2.imread(RandImageName), 1))
        I1Flipped = iu.StandardizeInputs(I1Flipped)
        I1Batch.append(I1Flipped)
        LabelBatch.append(Label)

        I1noise = np.float32(
            random_noise(cv2.imread(RandImageName), mode='gaussian', var=0.01))
        I1noise = iu.StandardizeInputs(I1noise)
        I1Batch.append(I1noise)
        LabelBatch.append(Label)

    return I1Batch, LabelBatch
示例#3
0
def ReadImages(ImageSize, DataPath):
    """
    Inputs: 
    ImageSize - Size of the Image
    DataPath - Paths of all images where testing will be run on
    Outputs:
    I1Combined - I1 image after any standardization and/or cropping/resizing to ImageSize
    I1 - Original I1 image for visualization purposes only
    """
    
    ImageName = DataPath
    
    I1 = cv2.imread(ImageName)
    
    if(I1 is None):
        # OpenCV returns empty list if image is not read! 
        print('ERROR: Image I1 cannot be read')
        sys.exit()
        
    ##########################################################################
    # Add any standardization or cropping/resizing if used in Training here!
    ##########################################################################

    I1S = iu.StandardizeInputs(np.float32(I1))

    I1Combined = np.expand_dims(I1S, axis=0)

    return I1Combined, I1
示例#4
0
def GenerateBatch(IBuffer, PatchSize):
    """
    Inputs: 
    DirNames - Full path to all image files without extension
    NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
    TrainLabels - Labels corresponding to Train
    NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
    ImageSize - Size of the Image
    MiniBatchSize is the size of the MiniBatch
    Outputs:
    I1Batch - Batch of I1 images after standardization and cropping/resizing to ImageSize
    HomeVecBatch - Batch of Homing Vector labels
    """
    IBatch = []

    # Generate random image
    if(np.shape(IBuffer)[1]>=246):
        IBuffer = np.hsplit(IBuffer, 2)
        I = IBuffer[0]
    else:
        I = IBuffer
    
    # Homography and Patch generation 
    IPatch = I
    # IOriginal, IPatch, AllPts, Mask = GenerateRandPatch(I, PatchSize, Vis=False)
    
    # Normalize Dataset
    # https://stackoverflow.com/questions/42275815/should-i-substract-imagenet-pretrained-inception-v3-model-mean-value-at-inceptio
    IS = iu.StandardizeInputs(np.float32(IPatch))
    
    # Append All Images and Mask
    IBatch.append(IS)

    # IBatch is the Original Image I1 Batch
    return IBatch
示例#5
0
def ReadImages(ImageSize, DataPath):
    """
    Inputs: 
    ImageSize - Size of the Image
    DataPath - Paths of all images where testing will be run on
    Outputs:
    I1Combined - I1 image after standardization and cropping/resizing to ImageSize
    I1 - Original I1 image for visualization purposes only
    """
    
    ImageName = DataPath
    
    I1 = cv2.imread(ImageName)
    
    if(I1 is None):
        # OpenCV returns empty list if image is not read! Like WTF!
        print('ERROR: Image I1 cannot be read')
        sys.exit()
        
        
    # Always get a random crop to fit the size of the network
    # I1 = iu.RandomCrop(I1, ImageSize)

    # Resize Image to fit size of the network
    # I1 = iu.Resize(I1, ImageSize)
        
    # Standardize Inputs as given by Inception v3 paper
    # MAYBE: Find Mean of Dataset or use from ImageNet
    # MAYBE: Normalize Dataset
    # https://stackoverflow.com/questions/42275815/should-i-substract-imagenet-pretrained-inception-v3-model-mean-value-at-inceptio
    I1S = iu.StandardizeInputs(np.float32(I1))

    I1Combined = np.expand_dims(I1S, axis=0)

    return I1Combined, I1
示例#6
0
def GenerateBatch(DirNamesTrain, TrainLabels, ImageSize, MiniBatchSize,
                  PerEpochCounter):
    """
    Inputs: 
    DirNames - Full path to all image files without extension
    NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
    TrainLabels - Labels corresponding to Train
    NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
    ImageSize - Size of the Image
    MiniBatchSize is the size of the MiniBatch
    Outputs:
    I1Batch - Batch of I1 images after standardization and cropping/resizing to ImageSize
    HomeVecBatch - Batch of Homing Vector labels
    """
    I1Batch = []
    LabelBatch = []

    ImageNum = 0
    # RandIdxAll = range(PerEpochCounter*MiniBatchSize,(PerEpochCounter+1)*MiniBatchSize)
    # count = 0
    while ImageNum < MiniBatchSize:
        # Generate random image
        RandIdx = random.randint(0, len(DirNamesTrain) - 1)
        # RandIdx = RandIdxAll[count]

        RandImageName = DirNamesTrain[RandIdx] + '.png'
        RandImageNameWithoutExt = DirNamesTrain[RandIdx]
        RandImageNum = RandImageNameWithoutExt.split('/')
        CurrPath = '/'.join(map(str, RandImageNum[0:-1])) + '/'
        RandImageNum = int(RandImageNum[-1])
        ImageNum += 1

        I1 = cv2.imread(RandImageName)

        # Always get a random crop to fit the size of the network
        # I1 = iu.RandomCrop(I1, ImageSize)

        # Apply a random perturbation
        # PerturbNum = random.randint(0, 5)
        # I1 = PerturbImage(I1, PerturbNum)

        # Standardize Inputs as given by Inception v3 paper
        # MAYBE: Find Mean of Dataset or use from ImageNet
        # MAYBE: Normalize Dataset
        # https://stackoverflow.com/questions/42275815/should-i-substract-imagenet-pretrained-inception-v3-model-mean-value-at-inceptio
        I1S = iu.StandardizeInputs(np.float32(I1))
        Label = np.ones(
            OutSize)  #[0:8,0:8#convertToOneHot(TrainLabels[RandIdx], 10)

        # Append All Images and Mask
        I1Batch.append(I1S)
        LabelBatch.append(Label)
        # count += 1

        # print(np.shape(LabelBatch))
        # z = input('z')

    return I1Batch, LabelBatch
示例#7
0
    def __getitem__(self, index):

        ImageName = self.img_files[index]

        I = cv2.imread(ImageName)
        I = cv2.resize(I, (640, 640), interpolation=cv2.INTER_AREA)
        #if(not np.shape(I2)):
        #    return
        #cv2.imshow("I", I)
        #cv2.waitKey()

        I, ImageSize = iu.CenterCropFactor(I, self.Factor)

        IS = iu.StandardizeInputs(np.float32(I))

        Image = np.transpose(IS, (2, 0, 1))

        MaskName = ImageName.replace("img", "mask")
        #Image = cv2.imread(ImageName)
        #RandImagePairName = self.img_files[index+1]
        #Image, ImageSize = iu.CenterCropFactor(Image, self.Factor)
        #Image = iu.StandardizeInputs(np.float32(Image))

        mask = cv2.imread(MaskName)
        if (not np.shape(mask)):
            mask = np.zeros(ImageSize, dtype=np.int)
            mask = cv2.resize(mask, (640, 640), interpolation=cv2.INTER_AREA)
        else:
            mask = cv2.resize(mask, (640, 640), interpolation=cv2.INTER_AREA)

        #cv2.imshow("mask", mask)
        #cv2.waitKey()

        mask, _ = iu.CenterCropFactor(mask, self.Factor)
        mask = np.float32(mask[:, :, 0]) / 255.0

        mask = np.expand_dims(mask, axis=2)
        mask = np.dstack((mask, 1.0 - mask))

        #transform = transforms.Compose([transforms.ToTensor(), \
        #                                transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
        return Image, mask
示例#8
0
    def __getitem__(self, index):

        ImageName = self.img_files[index]
        #ImagePairName = self.img_files[index+1]

        I = cv2.imread(ImageName)

        #I2 = cv2.imread(ImagePairName)
        #if(not np.shape(I2)):
        #    return

        I, ImageSize = iu.CenterCropFactor(I, self.Factor)
        #I2, _ = iu.CenterCropFactor(I2, self.Factor)

        #ICombined = np.dstack((I1, I2))  # 沿第3维组合

        IS = iu.StandardizeInputs(np.float32(I))

        Image = np.transpose(IS, (2, 0, 1))

        MaskName = ImageName.replace("img", "mask")
        #Image = cv2.imread(ImageName)
        #RandImagePairName = self.img_files[index+1]
        #Image, ImageSize = iu.CenterCropFactor(Image, self.Factor)
        #Image = iu.StandardizeInputs(np.float32(Image))

        mask = cv2.imread(MaskName)
        if (not np.shape(mask)):
            mask = np.zeros(ImageSize, dtype=np.int)

        #Mask2Name = ImagePairName.replace("img", "mask")
        #mask2 = cv2.imread(Mask2Name)
        #if (not np.shape(mask2)):
        #    mask2 = np.zeros(ImageSize, dtype=np.int)
        mask, _ = iu.CenterCropFactor(mask, self.Factor)
        mask = np.float32(mask[:, :, 0]) / 255.0
        mask = np.expand_dims(mask, axis=2)
        mask = np.dstack((mask, 1.0 - mask))

        #transform = transforms.Compose([transforms.ToTensor(), \
        #                                transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
        return Image, mask
def ReadImages(ImageSize, DataPath):
    """
    Inputs: 
    ImageSize - Size of the Image
    DataPath - Paths of all images where testing will be run on
    Outputs:
    I1Combined - I1 image after any standardization and/or cropping/resizing to ImageSize
    I1 - Original I1 image for visualization purposes only
    """

    ImageName = DataPath
    Image = cv2.imread(ImageName)
    Gray = cv2.cvtColor(Image, cv2.COLOR_BGR2GRAY)

    if (Image is None):
        # OpenCV returns empty list if image is not read!
        print('ERROR: Image cannot be read')
        sys.exit()

    Tau = min(ImageSize[0], ImageSize[1]) / 3
    # generate label
    Label = np.random.randint(2 * Tau, size=8) - Tau

    # get corner correspondences
    Corners, Warped = iu.getCorrespondence(Image.shape, Label, ImageSize)

    # get forward and backward homographies
    CroppedPatch, WarpedPatch = iu.getWarpingPair(Gray, ImageSize, Corners,
                                                  Warped)
    I1 = np.float32(np.stack([CroppedPatch, WarpedPatch], -1))

    #cv2.imshow("Image", Image)
    #cv2.imshow("Warped Patch", WarpedPatch)
    #cv2.imshow("Cropped Patch", CroppedPatch)

    I1S = iu.StandardizeInputs(np.float32(I1))

    I1Combined = np.expand_dims(I1S, axis=0)

    return I1Combined, Image, Label, Corners
示例#10
0
def GenerateBatch(TrainNames, PatchSize, MiniBatchSize, BasePath):
    """
    Inputs: 
    DirNames - Full path to all image files without extension
    NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
    TrainLabels - Labels corresponding to Train
    NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
    ImageSize - Size of the Image
    MiniBatchSize is the size of the MiniBatch
    Outputs:
    I1Batch - Batch of I1 images after standardization and cropping/resizing to ImageSize
    HomeVecBatch - Batch of Homing Vector labels
    """
    IBatch = []
    IOrgBatch = []
    AllPtsBatch = []
    IPatchBatch = []
    MaskBatch = []

    ImageNum = 0
    while ImageNum < MiniBatchSize:
        # Generate random image
        RandIdx = random.randint(0, len(TrainNames)-1)        
        RandImageName = BasePath + os.sep + TrainNames[RandIdx] 
        ImageNum += 1
        IBuffer = cv2.imread(RandImageName)
        if(np.shape(IBuffer)[1]>346):
            IBuffer = np.hsplit(IBuffer, 2)
            I = IBuffer[0]
        else:
            I = IBuffer

        # Homography and Patch generation 
        IOriginal, IPatch, AllPts, Mask = GenerateRandPatch(I, PatchSize, Vis=False)

        # Normalize Dataset
        # https://stackoverflow.com/questions/42275815/should-i-substract-imagenet-pretrained-inception-v3-model-mean-value-at-inceptio
        IS = iu.StandardizeInputs(np.float32(IPatch))

        # Append All Images and Mask
        IBatch.append(IS)
        IOrgBatch.append(I)
        AllPtsBatch.append(AllPts)
        IPatchBatch.append(IPatch)
        MaskBatch.append(Mask)

        
    # IBatch is the Original Image I1 Batch
    # IPatchBatch is I1 cropped to patch Size Batch
    # AllPtsBatch is the patch corners in I1 Batch
    # MaskBatch is the active region of I1Patch in I1 Batch
    return IBatch, IOrgBatch, AllPtsBatch, IPatchBatch, MaskBatch
示例#11
0
def GenerateBatch(IBuffer, Rho, PatchSize, CropType, Vis=False):
    """
    Inputs: 
    DirNames - Full path to all image files without extension
    NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
    TrainLabels - Labels corresponding to Train
    NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
    ImageSize - Size of the Image
    MiniBatchSize is the size of the MiniBatch
    Outputs:
    I1Batch - Batch of I1 images after standardization and cropping/resizing to ImageSize
    HomeVecBatch - Batch of Homing Vector labels
    """
    IBatch = []
    I1Batch = []
    I2Batch = []
    AllPtsBatch = []
    PerturbPtsBatch = []
    H4PtColBatch = []
    MaskBatch = []

    # Generate random image
    if (np.shape(IBuffer)[1] > 346):
        IBuffer = np.hsplit(IBuffer, 2)
        I1 = IBuffer[0]
    else:
        I1 = IBuffer

    # Homography and Patch generation
    IOriginal, I1Patch, I2Patch, AllPts, PerturbPts,\
    H4PtCol, Mask = GenerateRandPatch(I1, Rho, PatchSize, CropType, Vis=Vis) # Rand Patch will take the whole image as it doesn't have a choice
    ICombo = np.dstack((I1Patch, I2Patch))

    # Normalize Dataset
    # https://stackoverflow.com/questions/42275815/should-i-substract-imagenet-pretrained-inception-v3-model-mean-value-at-inceptio
    IS = iu.StandardizeInputs(np.float32(ICombo))

    # Append All Images and Mask
    IBatch.append(IS)
    I1Batch.append(I1Patch)
    I2Batch.append(I2Patch)
    AllPtsBatch.append(AllPts)
    PerturbPtsBatch.append(PerturbPts)
    H4PtColBatch.append(H4PtCol)
    MaskBatch.append(MaskBatch)

    # IBatch is the Original Image I1 Batch
    return IBatch, I1Batch, I2Batch, AllPtsBatch, PerturbPtsBatch, H4PtColBatch, MaskBatch
示例#12
0
def SetupAll(BasePath, LearningRate):
    """
    Inputs: 
    BasePath is the base path where Images are saved without "/" at the end
    Outputs:
    DirNames - Full path to all image files without extension
    Train/Val/Test - Idxs of all the images to be used for training/validation (held-out testing in this case)/testing
    Ratios - Ratios is a list of fraction of data used for [Train, Val, Test]
    CheckPointPath - Path to save checkpoints/model
    OptimizerParams - List of all OptimizerParams: depends on Optimizer
    SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
    ImageSize - Size of the image
    NumTrain/Val/TestSamples - length(Train/Val/Test)
    NumTestRunsPerEpoch - Number of passes of Val data with MiniBatchSize 
    Train/Val/TestLabels - Labels corresponding to Train/Val/Test
    """
    # Setup DirNames
    DirNamesPath = BasePath + os.sep + 'DirNames.txt'
    LabelNamesPath = BasePath + os.sep + 'Labels.txt'
    TrainPath = BasePath + os.sep + 'Train.txt'
    DirNames, TrainNames, TrainLabels = ReadDirNames(DirNamesPath,
                                                     LabelNamesPath, TrainPath)

    # Setup Neural Net Params
    # List of all OptimizerParams: depends on Optimizer
    # For ADAM Optimizer: [LearningRate, Beta1, Beta2, Epsilion]
    UseDefaultFlag = 0  # Set to 0 to use your own params, do not change default parameters
    if UseDefaultFlag:
        # Default Parameters
        OptimizerParams = [1e-3, 0.9, 0.999, 1e-8]
    else:
        # Custom Parameters
        OptimizerParams = [LearningRate, 0.9, 0.999, 1e-8]

    # Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
    SaveCheckPoint = 100

    # Image Input Shape
    Factor = 3
    I = cv2.imread(BasePath + os.sep + TrainNames[0])
    _, ImageSize = iu.CenterCropFactor(I, Factor)
    NumTrainSamples = len(TrainNames)

    return TrainNames, TrainLabels, OptimizerParams, SaveCheckPoint, Factor, ImageSize, NumTrainSamples
def GenerateBatch(BasePath, DirNamesTrain, TrainLabels, ImageSize, MiniBatchSize, NumValidSamples, train):
    """
    Inputs: 
    BasePath - Path to CIFAR10 folder without "/" at the end
    DirNamesTrain - Variable with Subfolder paths to train files
    NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
    TrainLabels - Labels corresponding to Train
    NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
    ImageSize - Size of the Image
    MiniBatchSize is the size of the MiniBatch
    Outputs:
    I1Batch - Batch of images
    LabelBatch - Batch of one-hot encoded labels 
    """
    I1Batch = []
    LabelBatch = []
    ImageNum = 0
    if train:
        while ImageNum < MiniBatchSize:
            # Generate random image
            RandIdx = random.randint(NumValidSamples, len(DirNamesTrain)-1)
            
            RandImageName = BasePath + os.sep + DirNamesTrain[RandIdx] + '.png'   
            ImageNum += 1

            I1 = np.float32(cv2.imread(RandImageName))
            # I1 = normalization(I1, axis=(1, 2))
            Label = convertToOneHot(TrainLabels[RandIdx], 10)
            I1Batch , LabelBatch= iu.preprocess1(I1, Label, I1Batch, LabelBatch)

    else:
        for i in range(NumValidSamples):
            ImageName = BasePath + os.sep + DirNamesTrain[i] + '.png'   
            I1 = np.float32(cv2.imread(ImageName))
            # I1 = normalization(I1, axis=(1, 2))
            Label = convertToOneHot(TrainLabels[i], 10)
            # Append All Images and Mask
            I1Batch.append(I1)
            LabelBatch.append(Label)
    return I1Batch, LabelBatch
示例#14
0
def GenerateBatch(BasePath, DirNamesTrain, ImageSize, LargeImgSize,
                  MiniBatchSize):
    """
    Inputs: 
    BasePath - Path to COCO folder without "/" at the end
    DirNamesTrain - Variable with Subfolder paths to train files
    NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
    NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
    ImageSize - Size of the Image
    MiniBatchSize is the size of the MiniBatch
    Outputs:
    I1Batch - Batch of images
    LabelBatch - Batch of one-hot encoded labels 
    """
    Tau = min(ImageSize[0], ImageSize[1]) / 3

    # Share the random crop across the batch
    x, y = iu.randomCrop((LargeImgSize[0], LargeImgSize[1]), ImageSize)
    Corners = np.array([(x, y), (x + ImageSize[1], y),
                        (x + ImageSize[1], y + ImageSize[0]),
                        (x, y + ImageSize[0])],
                       dtype=np.float32)

    # Get indices for the patch
    Indices = np.arange(0, LargeImgSize[0] * LargeImgSize[1])
    Indices = Indices.reshape(LargeImgSize[0], LargeImgSize[1])
    Indices = Indices[y:y + ImageSize[0], x:x + ImageSize[1]]

    I1Batch = []
    ImgOrgBatch = []
    LabelBatch = []
    CornerBatch = []

    ImageNum = 0
    while ImageNum < MiniBatchSize:
        # Generate random image
        RandIdx = random.randint(0, len(DirNamesTrain) - 1)

        RandImageName = BasePath + "/Data" + os.sep + DirNamesTrain[
            RandIdx] + '.jpg'
        ImageNum += 1
        Image = cv2.imread(RandImageName, 0)
        Image = cv2.resize(Image, (LargeImgSize[1], LargeImgSize[0]))

        Label = np.random.randint(2 * Tau, size=8) - Tau

        # distination (mapped) corners with (x, y) format
        Warped = np.array(
            [(Corners[0, 0] + Label[0], Corners[0, 1] + Label[1]),
             (Corners[1, 0] + Label[2], Corners[1, 1] + Label[3]),
             (Corners[2, 0] + Label[4], Corners[2, 1] + Label[5]),
             (Corners[3, 0] + Label[6], Corners[3, 1] + Label[7])],
            dtype=np.float32)

        CroppedPatch, WarpedPatch = iu.getWarpingPair(Image, ImageSize,
                                                      Corners, Warped)

        #print('c', CroppedPatch.shape)
        #print('w', WarpedPatch.shape)

        #cv2.imshow("Image", Image)
        #cv2.imshow("Warped Patch", WarpedPatch)
        #cv2.imshow("Cropped Patch", CroppedPatch)

        I1 = np.float32(np.stack([CroppedPatch, WarpedPatch], -1))

        # Append All Images and Mask
        I1Batch.append(I1)
        ImgOrgBatch.append(Image[..., None])
        LabelBatch.append(Label)
        CornerBatch.append(Corners.reshape(8))

        #cv2.waitKey()

    return I1Batch, ImgOrgBatch, LabelBatch, CornerBatch, Indices
示例#15
0
def GenerateBatch(TrainNames, TrainLabels, Factor, ImageSize, MiniBatchSize,
                  BasePath, MaxFrameDiff):
    """
    Inputs: 
    DirNames - Full path to all image files without extension
    NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
    TrainLabels - Labels corresponding to Train
    NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
    ImageSize - Size of the Image
    MiniBatchSize is the size of the MiniBatch
    Outputs:
    I1Batch - Batch of I1 images after standardization and cropping/resizing to ImageSize
    HomeVecBatch - Batch of Homing Vector labels
    """
    IBatch = []
    I1Batch = []
    I2Batch = []
    LabelBatch = []

    ImageNum = 0
    while ImageNum < MiniBatchSize:
        # Generate random image
        RandIdx = random.randint(0, len(TrainNames) - MaxFrameDiff)
        RandFrameDiff = random.randint(1, MaxFrameDiff)

        RandImageName = TrainNames[RandIdx]
        # Create File Number in same folder with RandFrameDiff
        RandImagePairName = RandImageName.split(
            os.sep)[0] + os.sep + 'events' + os.sep + 'event_' + str(
                int(re.split('_|.png', RandImageName)[-2]) + RandFrameDiff)
        I2 = cv2.imread(BasePath + os.sep + RandImagePairName + '.png')
        if (not np.shape(I2)
            ):  # OpenCV returns empty matrix if no image is found!
            continue  # Retry if RandImagePair is not valid!

        I1 = cv2.imread(BasePath + os.sep + RandImageName)

        ImageNum += 1

        I1, _ = iu.CenterCropFactor(I1, Factor)
        I2, _ = iu.CenterCropFactor(I2, Factor)

        ICombined = np.dstack((I1, I2))

        # Standardize Inputs as given by Inception v3 paper
        # MAYBE: Find Mean of Dataset or use from ImageNet
        # MAYBE: Normalize Dataset
        # https://stackoverflow.com/questions/42275815/should-i-substract-imagenet-pretrained-inception-v3-model-mean-value-at-inceptio
        IS = iu.StandardizeInputs(np.float32(ICombined))
        Label1 = cv2.imread(BasePath + os.sep + TrainLabels[RandIdx])
        Label1Name = TrainLabels[RandIdx]
        Label2Name = Label1Name.split(
            os.sep)[0] + os.sep + 'masks' + os.sep + 'mask_' + '%08d.png' % (
                int(re.split('_|.png', RandImageName)[-2]) + RandFrameDiff
            )  # 08
        Label2 = cv2.imread(BasePath + os.sep + Label2Name)
        LabelCropped, _ = iu.CenterCropFactor(
            Label1 | Label2,
            Factor)  # Label Mask is the logical OR of both Masks
        LabelCropped = np.float32(LabelCropped[:, :, 0]) / 255.0
        LabelCropped = np.expand_dims(LabelCropped, axis=3)
        LabelCropped = np.dstack((LabelCropped, 1.0 - LabelCropped))

        # Append All Images and Mask
        IBatch.append(IS)
        I1Batch.append(I1)
        I2Batch.append(I2)
        LabelBatch.append(LabelCropped)

    return IBatch, I1Batch, I2Batch, LabelBatch