Ejemplo n.º 1
0
tags = {
    'READY': 1,
    'DONE': 2,
    'EXIT': 3,
    'START': 4,
    'START_2': 5,
    'DONE_2': 6,
    'WAIT': 7
}

status = MPI.Status()   # get MPI status object

if rank == 0:

    timekeeper = TimeKeeper()
    timekeeper.time_now('start', True)

    session = DatabaseAccessor().session

    files = []
    tasks = []
    # Master process executes code below

    # Find files
    # For the moment skip any clever filtering of existing descriptor existance
    # TODO come back and add this in

    if DO_TASKS['find_files']:

        if DO_FILTER_DESCRIPTORS:
            filtered = DESCRIPTORS
Ejemplo n.º 2
0
def iterall(files_list, match_thresh=1.5):

    timekeeper = TimeKeeper()
    timekeeper.time_now('start', True)

    detector = cv2.SIFT()
    all_descriptors = {}
    descriptor_distances = {}
    desc_strings = {}
    weighted_strings = {}

    desc_man = ImageDescriptorManager(match_threshold=match_thresh, detector=detector)

    wrote_sift = 0
    for img_idx in files_list:
        img_path = files_list[img_idx]
        # print str(img_idx) + ' : ' + img_path

        # if os.path.isfile(img_path + '.sift'):
        #     print 'Found sift file : ' + img_path + '.sift'
        #     # print '.',
        #     # with open(img_path + '.sift', 'rb') as f:
        #     #     keypoints, descriptors = unpickle_keypoints( cPickle.load(f) )
        # else:
        #     print 'Creating sift : ' + img_path + '.sift'
        #     img = cv2.imread(img_path)
        #     img_gray = cv2.cvtColor( img, cv2.COLOR_BGR2GRAY )
        #     keypoints, descriptors = detector.detectAndCompute(img_gray, None)
        #
        #     key_desc_temp = pickle_keypoints(keypoints, descriptors)
        #
        #     with open(img_path + '.sift', 'wb') as f:
        #         cPickle.dump(key_desc_temp, f, protocol=cPickle.HIGHEST_PROTOCOL)
        #     del keypoints
        #     del descriptors
        #     wrote_sift += 1
        already_existed = touch_descriptor(img_path, detector)
        if not already_existed:
            wrote_sift += 1

                # all_descriptors[idx] = descriptors
    print 'Had to create ' + str(wrote_sift) + ' .sift files.'

    timekeeper.time_now('After check SIFT')

    for filea in files_list:
        img_desc = ImageDescriptor(filea, files_list[filea])
        desc_man.add_descriptor(img_desc)

    time_delta = timekeeper.time_now('After create ImageDescriptors', True)
    print '\nAvg ' + str(float(time_delta) / len(files_list)) + ' secs per img load'

    for man_img_idx in desc_man.image_descriptors:
        man_img = desc_man.image_descriptors[man_img_idx]
        print '***\n'
        print 'img sig : ' + man_img.get_image_signature()
        print man_img.get_weighted_signature()
        print 'Image ' + man_img.img_path + ' is most similar to ' + files_list[list(man_img.get_sorted_weights())[0]]
        for i in files_list:
            if files_list[i] == man_img.img_path:
                print str(man_img.img_path) + ' : ' + str(i)

        for j in files_list:
            if files_list[j] == files_list[list(man_img.get_sorted_weights())[0]]:
                print 'Similar to image : ' + str(j)

        print '\n'

    print timekeeper.time_now('After similarities', True)



    # for file_a in files_list:
    #     print '\nComparing file ' + str(file_a) + ' to : '
    #
    #     descriptor_orders = {}
    #     weighted_orders = {}
    #     with open(files_list[file_a] + '.sift', 'rb') as fa:
    #         keypoints_a, descriptors_a = unpickle_keypoints( cPickle.load(fa) )
    #
    #     for file_b in files_list:
    #         print file_b,
    #         if file_a != file_b:
    #             with open(files_list[file_b] + '.sift', 'rb') as fb:
    #                 keypoints_b, descriptors_b = unpickle_keypoints( cPickle.load(fb) )
    #
    #             matches_size = find_matches(str(file_a) + ',' + str(file_b),
    #                                         descriptors_a,
    #                                         descriptors_b,
    #                                         match_thresh)
    #             descriptor_orders[str(file_b)] = matches_size
    #
    #             weighted_orders[str(file_b)] = (str(file_b) + '=' + str(matches_size)) + ':'
    #             # weighted_orders[str(file_b)] = (str(file_b) * matches_size)
    #
    #             descriptor_distances[str(file_a) + ':' + str(file_b)] = matches_size
    #
    #     print '\nImage ' + files_list[file_a] + ' is most similar to ' \
    #           + files_list[int(list(sorted(descriptor_orders, key=descriptor_orders.get, reverse=True))[0])] + '\n'
    #
    #     desc_arr = list(sorted(descriptor_orders, key=descriptor_orders.get, reverse=True))
    #
    #     item_string = ''
    #     for item in desc_arr:
    #         item_string += str(weighted_orders[item])
    #     weighted_strings[file_a] = item_string

        # desc_strings[file_a] = ' '.join(desc_arr)
        # print 'Array of img_idx for file ' + str(file_a) + ', closest match first : \n' + desc_strings[file_a]

    # print '\n****\nArray of distances between images : \n' + str(descriptor_distances)




    # for ws in weighted_strings:
    #     print str(ws) + ' : ' + weighted_strings[ws]

    # print '\n****\nAll images description strings : \n' + str(desc_strings)
    # for s in desc_strings:
    #     for st in desc_strings:
    #         dist = nltk.metrics.edit_distance(
    #             desc_strings[s],
    #             desc_strings[st])
    #         if (len(files_list) * 0.3) < dist < (len(files_list) * 0.8):
    #             print '\n****\nClosest image pair signatures within 0.8 of each other:'
    #             print str(s) + ' : ' + desc_strings[s]
    #             print str(st) + ' : ' + desc_strings[st]
    #             print 'Deviation of sigs (lower is better) : ' + str(dist)
    #             print files_list[s]
    #             print files_list[st]
    #             print '\n'

    print '\n****\nAll images description strings : \n' + str(desc_strings)
    sigs = desc_man.get_all_image_signatures()
    for s in sigs:
        for st in sigs:
            dist = nltk.metrics.edit_distance(
                sigs[s],
                sigs[st])
            print sigs[s] + '\n' + sigs[st] + '\n' + str(dist)
            if (len(files_list) * 0.3) < dist < (len(files_list) * 0.8):
                print '\n****\nClosest image pair signatures within 0.8 of each other:'
                print str(s) + ' : ' + sigs[s]
                print str(st) + ' : ' + sigs[st]
                print 'Deviation of sigs (lower is better) : ' + str(dist)
                print files_list[s]
                print files_list[st]
                print '\n'

    print timekeeper.time_now('After description strings', True)

    # new_desc = ImageDescriptor(len(files_list), './portrait/11040852736_83cc5c2155_z.jpg')
    # desc_man.quick_init(new_desc)
    # print '\n' + new_desc.get_image_signature()
    # print new_desc.get_weighted_signature()
    timekeeper.time_now('Finish', True)
Ejemplo n.º 3
0
            res = 0
            for a in range(0, len(values)):
                res += weights[a] * values[a]

            transformed_img.itemset((x,y), res)

    return transformed_img
    # print x


def np_hist_to_cv(np_histogram_output):
    counts, bin_edges = np_histogram_output
    return counts.ravel().astype('float32')

tk = TimeKeeper()
tk.time_now('start')

filename = 'animals/11175682543_4db70a7f6f_z.jpg'

img = cv2.imread(filename, 0)
transformed_img = cv2.imread(filename, 0)

tk.time_now('read img')

transformed_img = calculate_lbp(img, transformed_img)

tk.time_now('lpb')

# cv2.imshow('image', img)
# cv2.imshow('thresholded image', transformed_img)