示例#1
0
def show_results(results, names, title=""):
    """
    Show the results in a nice grid.

    Show only 20 top results

    params
    ------
        results: ndarray (., 2)
            array containing, in the first column, the id of the result, on
            the second, the score of that results

        names: ndarray (.)
            image database.
    """
    fig = plt.figure(title)

    for i, result in enumerate(results):
        if i == 20:
            break
        image_name = names[i]
        image, _ = load.get_image(image_name)

        ax = fig.add_subplot(5, 4, i + 1)
        ax.xaxis.set_visible(False)
        ax.yaxis.set_visible(False)
        if len(image.shape) == 3:
            ax.imshow(image)
        else:
            ax.imshow(image, cmap=cm.gray)
        ax.set_title("%02d" % result[1])
        ax.imshow(image)
        #ax.set_title("%02d" % result[1])
        ax.set_title(image_name)
    plt.show()
示例#2
0
 def __init__(self, dest_surface, frames, delay=0.1, xbool=False):
     """
     :param dest_surface:
     :param frames: список с именами файлов
     :param delay: скорость смены кадров
     """
     self.image = dest_surface
     res = list()
     for frame in frames:
         image, _, _ = get_image(frame)
         if xbool:
             image = transform.flip(image, True, False)  # зеркалим
         res.append(image)
     self.frames = res
     self.frame_number = 0
     self.frame_max = len(frames)
     self.start_time = time.time()
     self.delay = delay
示例#3
0
文件: main.py 项目: fox0/stuff
def main():
    pygame.init()
    screen = pygame.display.set_mode(DISPLAY)
    pygame.display.set_caption('test v0.0.1')
    bg, _, _ = get_image('640_winter_lab_by_kooner_cz.png')

    entities = pygame.sprite.Group()  # Все объекты
    hero = Player(0, 50)
    entities.add(hero)
    platforms, total_level_width, total_level_height = load_level(entities)
    camera = Camera(WIN_WIDTH, WIN_HEIGHT, total_level_width,
                    total_level_height)

    timer = pygame.time.Clock()
    keys = {
        K_UP: False,
        K_DOWN: False,
        K_LEFT: False,
        K_RIGHT: False,
        K_SPACE: False,
    }
    while True:
        timer.tick(60)
        for e in pygame.event.get():
            if e.type == QUIT:
                return
            if e.type == KEYDOWN:
                if e.key == K_ESCAPE:
                    return
                keys[e.key] = True
            if e.type == KEYUP:
                keys[e.key] = False

        hero.update(keys, platforms)  # передвижение
        camera.update(hero)  # центризируем камеру относительно персонажа

        screen.blit(bg,
                    (0, 0))  # каждую итерацию необходимо всё перерисовывать
        for e in entities:
            screen.blit(e.image, camera.apply(e))
        pygame.display.update()  # обновление и вывод всех изменений на экран
def show_rate():
    current_path = os.path.dirname(__file__)
    print('* Current path = ' + current_path)
    print('Enter path of dataset')
    while True:
        try:
            data_path = input()
            total = len(fnmatch.filter(os.listdir(data_path), '*.png'))
            break
        except FileNotFoundError:
            print('Wrong path. Try again.')

    order = choose_process()

    count = letter_correct = correct = 0
    for f in glob.glob(data_path + '/*.png'):
        count += 1
        original_image, label = get_image(f)
        try:
            processed_image = process(original_image, order)

            if processed_image.text == label:
                correct += 1
            for i in range(min(len(processed_image.text), len(label))):
                if processed_image.text[i] == label[i]:
                    letter_correct += 1
        except:
            print('Error occured in processing image ', label)

        print('\r{0: .2f}% complete..'.format((count * 100) / total), end='')

    print('')
    print('\tOut of {0} letters {1} were correctly read.'
          'Success Rate: {2:.2f}%'.format(
              total * 6, letter_correct, (letter_correct * 100) / (total * 6)))
    print('\tOut of {0} captcha images, {1} were correctly read.'
          'Success Rate: {2:.2f}%'.format(total, correct,
                                          (correct * 100) / total))
    print('Press any key to continue.')
    input()
示例#5
0
def score_results(coords, desc, search_results, names, voc, verbose=False):
    """
    Scores the 200 best results

    params
    ------
        coords:

        desc:

        search_results:

        names: ndarray,
            image database

        voc: ndarray
            vocabulary

        verbose: boolean, optional, default: False
            Make output more verbose

    returns:
        search_results: ndarray
            indxs, scores
    """
    # FIXME doxstring
    for j, (result, score) in enumerate(search_results):
        if verbose:
            print "Scoring %d / %d" % (j, len(search_results))
        im2, mask2 = get_image(names[result, 0])
        interest_points = get_interest_points(mask2)
        desc2, coords2 = compute_boundary_desc(im2,
                                               mask2,
                                               interest_points)
        if desc2:
            search_results[j, 1] += score_(desc, desc2, coords, coords2)
    idxs = search_results[:, 1].argsort()[::-1]
    return search_results[idxs, :]
示例#6
0
文件: evaluate.py 项目: NelleV/SORBB
queries_total = 0
max_im = None
for test_query_index, query in enumerate(ground_truth):
    if test_query_index % 10 == 0:
        print "Computed %d images" % test_query_index
    if max_im and test_query_index == max_im:
        break

    query_file_name = query.keys()[0]
    positive_file_names = set(query.values()[0][0])
    ignore_file_names = set(query.values()[0][1])

    if query_file_name not in all_names:
        continue

    im, mask = load.get_image(query_file_name)
    interest_points = mem.cache(get_interest_points)(mask)
    desc, coords = mem.cache(compute_boundary_desc)(im, mask, interest_points)
    visual_words = compute_visual_words(desc, voc)
    if visual_words is None:
        continue

    #search_results = retrieval.search2(visual_words, postings, max_im=20)
    query_document, _ = np.histogram(visual_words,
                                     bins=np.arange(len(voc) + 1))
    search_results = retrieval2.search(query_document, max_im=20)
    #search_results2 = mem.cache(search2)(visual_words,postings,max_im=20)
    indices = search_results[:, 0].astype(int)
    search_file_names = all_names[indices]
    queries_total += 1
示例#7
0
文件: blocks.py 项目: fox0/stuff
 def __init__(self, x, y):
     sprite.Sprite.__init__(self)
     self.image, w, h = get_image('platform.png')
     self.rect = Rect(x, y, w, h)
示例#8
0
queries_total = 0
max_im = None
for test_query_index, query in enumerate(ground_truth):
    if test_query_index % 10 == 0:
        print "Computed %d images" % test_query_index
    if max_im and test_query_index == max_im:
        break

    query_file_name = query.keys()[0]
    positive_file_names = set(query.values()[0][0])
    ignore_file_names = set(query.values()[0][1])

    if query_file_name not in all_names:
        continue

    im, mask = load.get_image(query_file_name)
    interest_points = mem.cache(get_interest_points)(mask)
    desc, coords = mem.cache(compute_boundary_desc)(im, mask, interest_points)
    visual_words = compute_visual_words(desc, voc)
    if visual_words is None:
        continue

    #search_results = retrieval.search2(visual_words, postings, max_im=20)
    query_document, _ = np.histogram(visual_words,
                                     bins=np.arange(len(voc) + 1))
    search_results = retrieval2.search(query_document, max_im=20)
    #search_results2 = mem.cache(search2)(visual_words,postings,max_im=20)
    indices = search_results[:, 0].astype(int)
    search_file_names = all_names[indices]
    queries_total += 1
示例#9
0
import numpy as np
from sklearn.externals.joblib import Memory

from load import get_image
from descriptors import get_interest_points, compute_boundary_desc
from histograms import compute_visual_words
from ransac_homography import ransac
from retrieval import match_descriptors
from draw import show_matched_desc


mem = Memory(cachedir='.')
# FIXME Choose a couple of images, and load them properly
vocabulary = np.load('./data/vocabulary.npy')
im1, mask1 = get_image('Henry_Moore_Cut_0006.jpg', test=True)
im2, mask2 = get_image('Henry_Moore_Cut_0034.jpg', test=True)

interest_points = mem.cache(get_interest_points)(mask1)
desc1, coords1 = mem.cache(compute_boundary_desc)(im1,
                                              mask1,
                                              interest_points)
voc1 = vocabulary[compute_visual_words(desc1, vocabulary)]

interest_points = mem.cache(get_interest_points)(mask2)
desc2, coords2 = mem.cache(compute_boundary_desc)(im2,
                                               mask2,
                                               interest_points)
voc2 = vocabulary[compute_visual_words(desc2, vocabulary)]

# Use, as for a sift matching a nearest neighbour /  second nearest neighbour
# matching.