Ejemplo n.º 1
0
from src.algorithms.BaseAlgorithm import fromDirectory
from src.algorithms.SURF import SURF

dataDir = "../../data"
partsDir = f"{dataDir}/parts/300x300"
originalDir = f"{dataDir}/original/300x300"
outputDir = f"{dataDir}/experimentResults/old_single/surf/new"

surf = SURF(parts=fromDirectory(partsDir), images=fromDirectory(originalDir))
surf.process()
surf.writeResults(outputDir)
surf.printResults()
"""
Results:

Total time [ms]: 460.539
Average times [ms]:
    - Keypoint and descriptor computing for a part: 1.375
    - Keypoint and descriptor computing for an image: 40.681
    - Matching part with individual image: 0.425
    - Matching part with all images: 4.273
    - Processing entire part: 5.691
    
Average part descriptor size: 2936.89
Average image descriptor size: 39424.0

Deductions:
    - almost identical implementation as SIFT
    - similar times for computing the keypoints and descriptors
    - matching and processing entire part is a lot faster in SURF
    - this is most likely to much smaller descriptor size (and count)
Ejemplo n.º 2
0
        print(
            f"({strftime('%H:%M:%S')}) ({formatProgress(progress)}) Brightness: {formatPercentage(b)}, Contrast: {formatPercentage(c)}"
        )
        # preprocesses parts - changes brightness/contrast according to b or c
        images = []
        for path in paths:
            img = cv.imread(path)
            alpha = 1 + float(c) / 100
            beta = float(b) / 100 * 255
            # clips the values at 0 - 255, converts to unsigned ints (0 - 255)
            image = InputImage(
                np.asarray(np.clip(img * alpha + beta, 0, 255),
                           dtype=np.uint8), path)
            images.append(image)

        ft = FT(parts=images, images=fromDirectory(originalDir))
        results = ft.process()

        for i, result in enumerate(results):
            _path = None
            # finds correct result in annotations
            annotatedResult = next(
                x for x in annotations.matches
                if x.part == os.path.basename(result.partPath))
            if annotatedResult is None or os.path.basename(
                    result.imagePath) != annotatedResult.image:
                _path = f"{outputDir}/nope/{formatFileName(i, b, c)}"
            else:
                _x, _y = result.start
                distance = math.sqrt(
                    (_x - annotatedResult.x)**2 +
Ejemplo n.º 3
0
from src.algorithms.BaseAlgorithm import fromDirectory
from src.algorithms.FREAK import FREAK

dataDir = "../../data"
partsDir = f"{dataDir}/parts/300x300"
originalDir = f"{dataDir}/original/300x300"
outputDir = f"{dataDir}/experimentResults/old_single/fast_freak/new"

freak = FREAK(parts=fromDirectory(partsDir), images=fromDirectory(originalDir))
freak.process()
freak.writeResults(outputDir)
freak.printResults()
"""
Results:

Total time [ms]: 455.124
Average times [ms]:
    - Keypoint and descriptor computing for a part: 1.634
    - Keypoint and descriptor computing for an image: 21.735
    - Matching part with individual image: 3.185
    - Matching part with all images: 31.874
    - Processing entire part: 33.557
    
Average part descriptor size: 9737.14
Average image descriptor size: 110316.8

Deductions:
    - uses FAST keypoint detector and FREAK descriptor
    - suffers from generally the same problem with FAST - small images
        - didn't find any descriptors in the skyscraper part and the bridge part
        - only found 1 descriptor in the cable car (part 3)?
Ejemplo n.º 4
0
from src.algorithms.BaseAlgorithm import fromDirectory
from src.algorithms.BRIEF import BRIEF

dataDir = "../../data"
partsDir = f"{dataDir}/parts/300x300"
originalDir = f"{dataDir}/original/300x300"
outputDir = f"{dataDir}/experimentResults/old_single/fast_brief/new"

brief = BRIEF(parts=fromDirectory(partsDir),
              images=fromDirectory(originalDir))
brief.process()
brief.writeResults(outputDir)
brief.printResults()

"""
Results:

Total time [ms]: 230.885
Average times [ms]:
    - Keypoint and descriptor computing for a part: 0.698
    - Keypoint and descriptor computing for an image: 7.024
    - Matching part with individual image: 2.554
    - Matching part with all images: 25.561
    - Processing entire part: 26.311
    
Average part descriptor size: 4229.33
Average image descriptor size: 51843.2

Deductions:
    - uses FAST corner detector and BRIEF descriptor (neither can't be standalone)
        - FAST produces keypoints, but doesn't have compute() method for descriptors
Ejemplo n.º 5
0
from src.algorithms.BaseAlgorithm import fromDirectory
from src.algorithms.ORB import ORB

dataDir = "../../data"
partsDir = f"{dataDir}/parts/300x300"
originalDir = f"{dataDir}/original/300x300"
outputDir = f"{dataDir}/experimentResults/old_single/orb/new"

orb = ORB(parts=fromDirectory(partsDir),
          images=fromDirectory(originalDir))
orb.process()
orb.writeResults(outputDir)
orb.printResults()

"""
Results:

Total time [ms]: 240.82
Average times [ms]:
    - Keypoint and descriptor computing for a part: 1.195
    - Keypoint and descriptor computing for an image: 20.375
    - Matching part with individual image: 0.694
    - Matching part with all images: 6.953
    - Processing entire part: 8.192
    
Average part descriptor size: 3288.0
Average image descriptor size: 13808.0

Deductions:
    - similar results to FAST+BRIEF (which makes sense, since ORB builds on BOTH of them and improves their qualities)
    - compared to FAST+BRIEF:
Ejemplo n.º 6
0
from src.algorithms.BaseAlgorithm import fromDirectory
from src.algorithms.HOG import HOG

dataDir = "../../data"
partsDir = f"{dataDir}/parts/300x300"
originalDir = f"{dataDir}/original/300x300"
outputDir = f"{dataDir}/experimentResults/old_single/hog/new"

hog = HOG(parts=fromDirectory(partsDir),
          images=fromDirectory(originalDir))
hog.process()
hog.writeResults(outputDir)
hog.printResults()

"""
Results:

Total time [ms]: 7740.505
Average times [ms]:
    - Descriptor computing for a part: 0.334
    - Descriptor computing for a image: 5.248    
    - Matching part with individual image: 85.34
    - Matching part with all images: 853.442
    - Processing entire part: 853.887

Average part descriptor size: 14464.0
Average image descriptor size: 197136.0 
Average subsets in image: 2967.11

Deductions:
    - calculating descriptors with HOG is very quick
Ejemplo n.º 7
0
from src.algorithms.BaseAlgorithm import fromDirectory
from src.algorithms.FT import FT

dataDir = "../../data"
partsDir = f"{dataDir}/parts/300x300"
originalDir = f"{dataDir}/original/300x300"
outputDir = f"{dataDir}/experimentResults/old_single/ft/new"

ft = FT(parts=fromDirectory(partsDir), images=fromDirectory(originalDir))
ft.process()
ft.writeResults(outputDir)
ft.printResults()
"""
Results:

Total time [ms]: 872.91
Average times [ms]:
    - Descriptor computing for a part: 0.673
    - Descriptor computing for a image: 7.644
    - Matching part with individual image: 8.757
    - Matching part with all images: 87.593
    - Processing entire part: 88.319
    
Average part descriptor size: 128.11
Average image descriptor size: 1444.0
Average subsets in image: 727.67

Deductions:
    - comparing the results to HOG, since both of them sort of consider the picture as basically one giant keypoint:
        - FT is much faster
        - this is most likely to drastically smaller descriptor size and subset count 
Ejemplo n.º 8
0
partsDir = f"{dataDir}/parts/{size}"
originalDir = f"{dataDir}/original/{size}"
outputDir = f"{dataDir}/experimentResults/{size}"

Algorithm = namedtuple("Algorithm", "name type output")

algorithms = [
    Algorithm(name="FT", type=FT, output=f"{outputDir}/ft"),
    Algorithm(name="SIFT", type=SIFT, output=f"{outputDir}/sift"),
    Algorithm(name="SURF", type=SURF, output=f"{outputDir}/surf"),
    Algorithm(name="BRIEF", type=BRIEF, output=f"{outputDir}/fast_brief"),
    Algorithm(name="ORB", type=ORB, output=f"{outputDir}/orb"),
    Algorithm(name="FREAK", type=FREAK, output=f"{outputDir}/fast_freak"),
    Algorithm(name="HOG", type=HOG, output=f"{outputDir}/hog")
]

print(f"({strftime('%H:%M:%S')}) Started")

for a in algorithms:
    print(f"({strftime('%H:%M:%S')}) Algorithm: {a.name}")
    for i in range(10):
        print(f"({strftime('%H:%M:%S')}) - Iteration {i + 1}")
        obj = a.type(parts=fromDirectory(partsDir),
                     images=fromDirectory(originalDir),
                     iteration=i)
        obj.process()
        obj.writeResults(f"{a.output}/{i}", includePart=True)
        obj.printResults(f"{a.output}/{i}_result.txt")
        gc.collect()

print(f"({strftime('%H:%M:%S')}) Ended")
Ejemplo n.º 9
0
from src.algorithms.BaseAlgorithm import fromDirectory
from src.algorithms.SIFT import SIFT

dataDir = "../../data"
partsDir = f"{dataDir}/parts/300x300"
originalDir = f"{dataDir}/original/300x300"
outputDir = f"{dataDir}/experimentResults/old_single/sift/new"

sift = SIFT(parts=fromDirectory(partsDir), images=fromDirectory(originalDir))
sift.process()
sift.writeResults(outputDir)
sift.printResults()
"""
Results:

Total time [ms]: 618.98
Average times [ms]:
    - Keypoint and descriptor computing for a part: 4.767
    - Keypoint and descriptor computing for an image: 47.002
    - Matching part with individual image: 1.141
    - Matching part with all images: 11.429
    - Processing entire part: 16.254

Average part descriptor size: 12458.67
Average image descriptor size: 74598.4

Deductions:
    - compared to HOG, SIFT is much faster and I think it should be more robust 
    - after improving HOG, both pre-compute their descriptors, so the difference is most likely in the descriptor sizes
    - average image descriptor for SIFT is 62.16 % smaller than HOG = that might be responsible for the 97.86 % speed increase over HOG 
"""