Example #1
0
def output(batchImages, batchPclA, batchPclB, batchtMatT, batchtMatP,
           batchTFrecFileIDs, **kwargs):
    """
    TODO: SIMILAR TO DATA INPUT -> WE NEED A QUEUE RUNNER TO WRITE THIS OFF TO BE FASTER

    Everything evaluated
    Warp second image based on predicted HAB and write to the new address
    Args:
    Returns:
    Raises:
      ValueError: If no dataDir
    """
    for i in range(kwargs.get('activeBatchSize')):
        # split for depth dimension
        depthA, depthB = np.asarray(np.split(batchImages[i], 2, axis=2))
        pclATransformed, tMatRes, depthATransformed = _apply_prediction(
            batchPclA[i], batchtMatT[i], batchtMatP[i], **kwargs)
        # Write each Tensorflow record
        filename = str(batchTFrecFileIDs[i][0]) + "_" + str(
            batchTFrecFileIDs[i][1]) + "_" + str(batchTFrecFileIDs[i][2])
        tfrecord_io.tfrecord_writer(batchTFrecFileIDs[i], pclATransformed,
                                    batchPclB[i], depthATransformed, depthB,
                                    tMatRes,
                                    kwargs.get('warpedOutputFolder') + '/',
                                    filename)
    return
Example #2
0
def output_loop(batchImages, batchPclA, batchPclB, bargetT, targetP,
                batchTFrecFileIDs, i, **kwargs):
    """
    TODO: SIMILAR TO DATA INPUT -> WE NEED A QUEUE RUNNER TO WRITE THIS OFF TO BE FASTER

    Everything evaluated
    Warp second image based on predicted HAB and write to the new address
    Args:
    Returns:
    Raises:
      ValueError: If no dataDir
    """
    # split for depth dimension
    depthA, depthB = np.asarray(np.split(batchImages[i], 2, axis=2))
    depthB = depthB.reshape(kwargs.get('imageDepthRows'),
                            kwargs.get('imageDepthCols'))
    pclATransformed, targetRes, depthATransformed = _apply_prediction(
        batchPclA[i], bargetT[i], targetP[i], **kwargs)
    # Write each Tensorflow record
    filename = str(batchTFrecFileIDs[i][0]) + "_" + str(
        batchTFrecFileIDs[i][1]) + "_" + str(batchTFrecFileIDs[i][2])
    tfrecord_io.tfrecord_writer(batchTFrecFileIDs[i], pclATransformed,
                                batchPclB[i], depthATransformed, depthB,
                                targetRes,
                                kwargs.get('warpedOutputFolder') + '/',
                                filename)
    if kwargs.get('phase') == 'train':
        folderTmat = kwargs.get('tMatTrainDir')
    else:
        folderTmat = kwargs.get('tMatTestDir')
    write_predictions(batchTFrecFileIDs[i], targetP[i], folderTmat)
    return
Example #3
0
def output_with_test_image_files(batchImageOrig, batchImage, batchPOrig, batchTHAB, batchPHAB, batchTFrecFileIDs, **kwargs):
    """
    TODO: SIMILAR TO DATA INPUT -> WE NEED A QUEUE RUNNER TO WRITE THIS OFF TO BE FASTER 

    Everything evaluated
    Warp second image based on predicted HAB and write to the new address
    Args:
    Returns:
    Raises:
      ValueError: If no dataDir
    """
    imagesOutputFolder = kwargs.get('testLogDir')+'/images/'
    _set_folders(imagesOutputFolder)
    ## for each value call the record writer function
    corrupt_patchOrig = 0
    corrupt_patchPert = 0
    corrupt_imageOrig = 0
    dataJson = {'pOrig':[], 'tHAB':[], 'pHAB':[]}
    for i in range(kwargs.get('activeBatchSize')):
        dataJson['pOrig'] = batchPOrig[i].tolist()
        dataJson['tHAB'] = batchTHAB[i].tolist()
        dataJson['pHAB'] = batchPHAB[i].tolist()
        write_json_file(imagesOutputFolder+str(batchTFrecFileIDs[i][0])+'_'+str(batchTFrecFileIDs[i][1]), dataJson)
        cv2.imwrite(imagesOutputFolder+str(batchTFrecFileIDs[i][0])+'_'+ str(batchTFrecFileIDs[i][1])+'_fullOrig.jpg', batchImageOrig[i]*255)

        orig, pert = np.asarray(np.split(batchImage[i], 2, axis=2))
        cv2.imwrite(imagesOutputFolder+str(batchTFrecFileIDs[i][0])+'_'+str(batchTFrecFileIDs[i][1])+'_ob.jpg', orig*255)
        cv2.imwrite(imagesOutputFolder+str(batchTFrecFileIDs[i][0])+'_'+str(batchTFrecFileIDs[i][1])+'_pb.jpg', pert*255)

        # Get the difference of tHAB and pHAB, and make new perturbed image based on that
        cHAB = batchTHAB[i]-batchPHAB[i]
        # put them in correct form
        HAB = np.asarray([[cHAB[0], cHAB[1], cHAB[2], cHAB[3]],
                          [cHAB[4], cHAB[5], cHAB[6], cHAB[7]]], np.float32)
        pOrig = np.asarray([[batchPOrig[i][0], batchPOrig[i][1], batchPOrig[i][2], batchPOrig[i][3]],
                            [batchPOrig[i][4], batchPOrig[i][5], batchPOrig[i][6], batchPOrig[i][7]]])
        if kwargs.get('warpOriginalImage'):
            patchOrig, patchPert = _warp_w_orig_newTarget(batchImageOrig[i], batchImage[i], pOrig, HAB, **kwargs)
            # NOT DEVELOPED YET
            #imageOrig, imagePert = _warp_w_orig_newOrig(batchImageOrig[i], batchImage[i], pOrig, batchPHAB[i], **kwargs)
        else:
            patchOrig, patchPert = _warp_wOut_orig_newTarget(batchImage[i], batchPHAB[i])

        cv2.imwrite(imagesOutputFolder+str(batchTFrecFileIDs[i][0])+'_'+str(batchTFrecFileIDs[i][1])+'_op.jpg', patchOrig*255)
        cv2.imwrite(imagesOutputFolder+str(batchTFrecFileIDs[i][0])+'_'+str(batchTFrecFileIDs[i][1])+'_pp.jpg', patchPert*255)
        # Write each Tensorflow record
        fileIDs = str(batchTFrecFileIDs[i][0]) + '_' + str(batchTFrecFileIDs[i][1])
        tfrecord_io.tfrecord_writer(batchImageOrig[i], patchOrig, patchPert, pOrig, HAB,
                                    kwargs.get('warpedOutputFolder')+'/',
                                    fileIDs, batchTFrecFileIDs[i])
        if batchImageOrig[i].shape[0] != 240:
            corrupt_imageOrig+=1
        if patchOrig.shape[0]!=128:
            corrupt_patchOrig+=1
        if patchPert.shape[0]!=128:
            corrupt_patchPert+=1

    return corrupt_imageOrig, corrupt_patchOrig, corrupt_patchPert
Example #4
0
def output(batchImageOrig, batchImage, batchPOrig, batchTHAB, batchPHAB, batchPrevPredHAB, batchTFrecFileIDs, **kwargs):
    """
    TODO: SIMILAR TO DATA INPUT -> WE NEED A QUEUE RUNNER TO WRITE THIS OFF TO BE FASTER 

    Everything evaluated
    Warp second image based on predicted HAB and write to the new address
    Args:
    Returns:
    Raises:
      ValueError: If no dataDir
    """
    ## for each value call the record writer function
    corrupt_patchOrig=0
    corrupt_patchPert=0
    corrupt_imageOrig=0
    for i in range(kwargs.get('activeBatchSize')):
        
        
        ##### THIS IS THE ADDED PREDICTION SUM
        cHAB = batchPrevPredHAB[i]+batchPHAB[i]
        # put them in correct form
        additivePredHAB = np.asarray([[cHAB[0], cHAB[1], cHAB[2], cHAB[3]],
                                      [cHAB[4], cHAB[5], cHAB[6], cHAB[7]]], np.float32)
        pOrig = np.asarray([[batchPOrig[i][0], batchPOrig[i][1], batchPOrig[i][2], batchPOrig[i][3]],
                            [batchPOrig[i][4], batchPOrig[i][5], batchPOrig[i][6], batchPOrig[i][7]]])
        ##### THIS IS THE CORRECT FUNCTION USING ADDITIVE PREDICTION ON ORIGINAL NON CHANGED IMGORIG
        ### PATCH ORIG MOVES TOWARDS PATCH PERT
        ### patchOrig is only changing, patchPert is never changing
        patchOrig, patchPert = _warp_w_orig_newOrig(batchImageOrig[i], batchImage[i], pOrig, additivePredHAB, **kwargs)
        #### THIS HAB IS THE NEW TARGET HAB
        # Get the difference of tHAB and pHAB, and make new perturbed image based on that
        cHAB = batchTHAB[i]-batchPHAB[i]
        # put them in correct form
        newTargetHAB = np.asarray([[cHAB[0], cHAB[1], cHAB[2], cHAB[3]],
                          [cHAB[4], cHAB[5], cHAB[6], cHAB[7]]], np.float32)
        # Write each Tensorflow record
        fileIDs = str(batchTFrecFileIDs[i][0]) + '_' + str(batchTFrecFileIDs[i][1])
        tfrecord_io.tfrecord_writer(batchImageOrig[i], patchOrig, patchPert, pOrig, newTargetHAB, additivePredHAB,
                                    kwargs.get('warpedOutputFolder')+'/',
                                    fileIDs, batchTFrecFileIDs[i])
        if batchImageOrig[i].shape[0] != 240:
            corrupt_imageOrig+=1
        if patchOrig.shape[0]!=128:
            corrupt_patchOrig+=1
        if patchPert.shape[0]!=128:
            corrupt_patchPert+=1

    return corrupt_imageOrig, corrupt_patchOrig, corrupt_patchPert
Example #5
0
def output(batchImageOrig, batchImage, batchPOrig, batchTHAB, batchPHAB, batchTFrecFileIDs, **kwargs):
    """
    TODO: SIMILAR TO DATA INPUT -> WE NEED A QUEUE RUNNER TO WRITE THIS OFF TO BE FASTER 

    Everything evaluated
    Warp second image based on predicted HAB and write to the new address
    Args:
    Returns:
    Raises:
      ValueError: If no dataDir
    """
    warpedImageFolder = kwargs.get('warpedOutputFolder')+'/'
    ## for each value call the record writer function
    corrupt_patchOrig=0
    corrupt_patchPert=0
    corrupt_imageOrig=0
    for i in range(kwargs.get('activeBatchSize')):
        # Get the difference of tHAB and pHAB, and make new perturbed image based on that
        cHAB = batchTHAB[i]-batchPHAB[i]
        # put them in correct form
        HAB = np.asarray([[cHAB[0], cHAB[1], cHAB[2], cHAB[3]],
                          [cHAB[4], cHAB[5], cHAB[6], cHAB[7]]], np.float32)
        pOrig = np.asarray([[batchPOrig[i][0], batchPOrig[i][1], batchPOrig[i][2], batchPOrig[i][3]],
                            [batchPOrig[i][4], batchPOrig[i][5], batchPOrig[i][6], batchPOrig[i][7]]])
        if kwargs.get('warpOriginalImage'):
            patchOrig, patchPert = _warp_w_orig_newTarget(batchImageOrig[i], batchImage[i], pOrig, HAB, **kwargs)
            # NOT DEVELOPED YET
            #imageOrig, imagePert = _warp_w_orig_newOrig(batchImageOrig[i], batchImage[i], pOrig, batchPHAB[i], **kwargs)
        else:
            patchOrig, patchPert = _warp_wOut_orig_newTarget(batchImage[i], batchPHAB[i])

        # Write each Tensorflow record
        fileIDs = str(batchTFrecFileIDs[i][0]) + '_' + str(batchTFrecFileIDs[i][1])
        tfrecord_io.tfrecord_writer(batchImageOrig[i], patchOrig, patchPert, pOrig, HAB,
                                    warpedImageFolder,
                                    fileIDs, batchTFrecFileIDs[i])
        if batchImageOrig[i].shape[0] != 240:
            corrupt_imageOrig+=1
        if patchOrig.shape[0]!=128:
            corrupt_patchOrig+=1
        if patchPert.shape[0]!=128:
            corrupt_patchPert+=1

    return corrupt_imageOrig, corrupt_patchOrig, corrupt_patchPert
Example #6
0
def perturb_writer(ID, idx, imgOrig, imgPatchOrig, imgPatchPert, HAB, pOrig,
                   tfRecFolder):
    ##### original patch
    #filename = filenameWrite.replace(".jpg", "_"+ str(idx) +"_orig.jpg")
    #cv2.imwrite(patchFolder+filename, imgPatchOrig)
    ##### perturbed patch
    #filename = filenameWrite.replace(".jpg", "_"+ str(idx) +"_pert.jpg")
    #cv2.imwrite(patchFolder+filename, imgPatchPert)
    ##### HAB
    #filename = filenameWrite.replace(".jpg", "_"+ str(idx) +"_HAB.csv")
    #with open(HABFolder+filename, 'w', newline='') as f:
    #    writer = csv.writer(f)
    #    if (HAB.ndim > 1):
    #        writer.writerows(HAB)
    #    else:
    #        writer.writerow(HAB)
    ##### Original square
    #filename = filenameWrite.replace(".jpg", "_"+ str(idx) +".csv")
    #with open(squareFolder+filename, 'w', newline='') as f:
    #    writer = csv.writer(f)
    #    if (pOrig.ndim > 1):
    #        writer.writerows(pOrig)
    #    else:
    #        writer.writerow(pOrig)
    # Tensorflow record
    filename = str(ID) + "_" + str(idx)
    fileID = [ID, idx]
    tfrecord_io.tfrecord_writer(imgOrig, imgPatchOrig, imgPatchPert, pOrig,
                                HAB, HAB * 0, tfRecFolder, filename, fileID)

    #imgOp = image_process_subMean_divStd(imgPatchOrig)
    #imgPp = image_process_subMean_divStd(imgPatchPert)
    #tfrecord_writer(imgOp, imgPp, HAB, pOrig, tfRecFolder+filename)
    # Tensorflow record in range -1 and 1
    #filename = filenameWrite.replace(".jpg", "_"+ str(idx))
    #imgOp = image_process_subMean_divStd_n1p1(imgPatchOrig)
    #imgPp = image_process_subMean_divStd_n1p1(imgPatchPert)
    #tfrecord_writer(imgOp, imgPp, HAB, pOrig, tfRecFolderN1P1+filename)

    return