Esempio n. 1
0
def _fetchAllResults(query, daysBack, maxRequests=None):
    results = []
    searchData = search.search(query, daysBack)
    results += searchData["results"]
    if maxRequests is None:
        maxRequests = len(searchData["cursor"]["pages"])

    for page in searchData["cursor"]["pages"][1:maxRequests]:
        start = page["start"]
        data = search.search(query, daysBack, start=start)
        results += data["results"]
    return results
Esempio n. 2
0
def time_predictions(search_model, queries, rerank_n, map_n):
    total_time = 0

    for idx, query in enumerate(queries):
        print('Running query {}/{}: {}'.format(idx + 1, len(queries), query))
        image = convert_image(Image.open(query))

        start_time = timer()
        search(search_model, image, top_n=map_n, localize_n=rerank_n)
        end_time = timer()
        total_time += end_time - start_time

    return total_time
Esempio n. 3
0
def main(args):
    import numpy as np

    args = parser.parse_args(args)

    if not exists(args.image_dir):
        print("Path {} does not exist".format(args.image_dir))

    images_path = args.image_dir

    images = os.listdir(images_path)
    extensions = ['.png', '.jpg', '.jpeg']
    images = [
        img for img in images if os.path.splitext(img)[1].lower() in extensions
    ]

    search_model = SearchModel(args.features)

    result_data = {}

    for image_file in images:

        if not exists(os.path.join(images_path, image_file)):
            print("{} does not exists. Skipping...".format(image_file))
            continue

        query = loadImage(join(images_path, image_file))

        results, similarities, bboxes = search(search_model, query, top_n=1)

        print('Top result for query image {}'.format(image_file))

        result_path = search_model.get_metadata(results[0])['image']

        image_file_name = image_file.split('.')[0]

        if result_path not in result_data:
            similar_images = [image_file_name]
            image_sect = list(bboxes[0])
            similar_images.append(image_sect)
            image_section = [similar_images]
            res_path = result_path.split('/')[-1].split('.')[0]
            result_data[res_path] = image_section

        else:
            res_path = result_path.split('/')[-1].split('.')[0]
            similar_images = [image_file_name]
            image_sect = list(bboxes[0])
            similar_images.append(image_sect)

            result_data[res_path].append(similar_images)

        print('{}\t{:.4f}\t{}'.format(result_path, similarities[0], bboxes[0]))

    meta_file_name = args.output_file
    with open(meta_file_name, 'w') as f:
        json.dump(result_data, f)
Esempio n. 4
0
def search_image(image, bounding_box, top_n, search_mode):
    try:
        image = Image.open(image).convert('RGB')
    except (IOError, OSError):
        raise InvalidUsage('Error decoding image', 415)

    try:
        crop = crop_image(image, bounding_box)
    except ValueError as e:
        raise InvalidUsage('Bad bounding box', 400)

    try:
        indices, scores, bboxes = search(search_model,
                                         convert_image(crop),
                                         top_n=top_n,
                                         localize=search_mode.localize,
                                         rerank=search_mode.rerank,
                                         avg_qe=search_mode.avg_qe)
    except ValueError as e:
        print('Error while searching for roi: {}'.format(e))
        if app.debug:
            import traceback
            traceback.print_tb(e.__traceback__)
        raise InvalidUsage('Internal error while searching', 400)

    if bboxes is None:
        bboxes = [None for i in range(len(indices))]

    # Build response
    results = []
    for index, score, bbox in zip(indices, scores, bboxes):
        image_path = search_model.get_metadata(index)['image']
        image_info = search_model.query_database(image_path)
        if image_info is None:
            print(
                'Warning: result image {} not found in db'.format(image_path))
            continue
        image_dict = {
            'name': os.path.basename(image_path),
            'score': round(score, 4) if not isnan(score) else 'NaN',
            'url': 'static/data/' + image_path,
            'ext_url': image_info.get('external_url', '')
        }
        if bbox:
            image_dict['bbox'] = {
                'x1': bbox[0],
                'y1': bbox[1],
                'x2': bbox[2],
                'y2': bbox[3]
            }
        results.append(image_dict)
    return results
Esempio n. 5
0
def run_predictions(search_model, queries, rerank_n, map_n):
    predictions = OrderedDict()
    for idx, query in enumerate(queries):
        print('Running query {}/{}: {}'.format(idx + 1, len(queries), query))
        image = convert_image(Image.open(query))
        results, _, bboxes = search(search_model,
                                    image,
                                    top_n=map_n,
                                    localize_n=rerank_n)
        predictions[query] = []
        for result, bbox in zip(results, bboxes):
            result_path = search_model.get_metadata(result)['image']
            predictions[query].append((result_path, bbox))
    return predictions
Esempio n. 6
0
def main(args):
    args = parser.parse_args(args)

    if args.bbox:
        args.bbox = tuple(args.bbox)

    if args.output and not args.image_dir:
        print('Image directory required with option output')
        return

    query_images = []
    for image_path in args.images:
        if os.path.exists(image_path):
            query_images.append(image_path)
        else:
            print('Image {} does not exist. Skipping.'.format(image_path))

    search_model = SearchModel(args.model, args.features, args.database)

    for image_path in query_images:
        with Image.open(image_path) as image:
            image = image.convert('RGB')
            if args.bbox:
                image = crop_image(image, args.bbox)
            query = convert_image(image)

        start_time = timer()
        results, similarities, bboxes = search(search_model,
                                               query,
                                               top_n=args.top_n)
        end_time = timer()

        print('Search took {:.6f} seconds'.format(end_time - start_time))
        print('Top {} results for query image {}'.format(
            args.top_n, image_path))
        for result, similarity, bbox in zip(results, similarities, bboxes):
            result_path = search_model.get_metadata(result)['image']

            print('{}\t{:.4f}\t{}'.format(result_path, similarity, bbox))

            if args.output:
                result_path = join(args.image_dir, result_path)
                target_path = join(
                    args.output, '{}_{:.4f}_{}'.format(basename(query_path),
                                                       similarity,
                                                       basename(result_path)))
                draw_bbox_and_save(result_path, target_path, bbox)
Esempio n. 7
0
def searchBoth(query, daysBack=1):
    results = search.search(query, daysBack=daysBack)["results"]
    if len(results) == 0:
        return [], []

    relevant = []
    excluded = []
    competition = _searchCompetitionResults(query)
    competitionUrls = _extractUrls(competition)
    for result in results:
        if result["unescapedUrl"] in competitionUrls:
            relevant.append(result)
            #logging.info("Found at: %s", competitionUrls.index(result["unescapedUrl"]))
        else:
            excluded.append(result)
    logging.debug("Relevant rate: %s/%s", len(relevant), len(results))

    return relevant, excluded
Esempio n. 8
0
def main():
    """Main method."""
    parser = argparse.ArgumentParser(description='Look for an author in Web of Science.')
    parser.add_argument('author', help='Surname and name of the author')
    parser.add_argument('-v', '--verbose', action='store_true', help='Verbose')
    parser.add_argument('-r', '--results', type=int, default=100,
                        help='Number of results to be shown')
    parser.add_argument('-y', '--years', type=int, default=5,
                        help='Max age of shown papers')
    parser.add_argument('-A', '--affiliation', help='Affiliation of the author')
    parser.add_argument('--save', action='store_true', help='save results in a db')

    args = parser.parse_args()

    # Search the author
    results = search.search(args.author, args.years, args.results, args.affiliation)

    # Save in db
    if args.save and len(results) > 0:
        print_('Save records in db')
        dbconn.save(args.author.lower(), results)
        print_('Completed!')
def test_if_correct_2():
    assert search(pattern='defabc', text='abcdef') == 3
def test_if_correct():
    assert search('aaa', 'aaa') == 0
def test_if_pattern_empty():
    assert search(pattern='', text='fld;fld') == -1
def test_if_wrong():
    assert search(pattern='defabf', text='abcdef') == -1
def test_valid_string(pattern, answer, text):
    assert str(answer) == str(search(pattern=pattern,
                                     text=text))
Esempio n. 14
0
def warmup_jit(search_model, queries, rerank_n, map_n):
    image = convert_image(Image.open(queries[0]))
    search(search_model, image, top_n=map_n, localize_n=rerank_n)
Esempio n. 15
0
from src.search import search

print(search(pattern=str(input()), text=str(input())))
Esempio n. 16
0
 def get(self):
     msg = request.args.get('msg')
     ret = search(msg)
     return {'data': ret}
Esempio n. 17
0
from src.map import maps
from src.search import search
from src.robot import edward_bot
from src.visualize import display
from src.Map import Map_plot as map
# import cv2

M = map(150, 300, 5, 15, 4)

E = edward_bot(5, 5, 30, 0, M)
resolution = 10

Path1 = search([200, 120], [10, 10], 10, M, E, 1)
path1 = Path1.A_star()

# D1 = display(Path1.start_position, Path1.goal_position, M.x_min,M.y_min,M.x_max,M.y_max)
# obstacle_list = []
# D1.display_map(path1,Path1.visited_node,obstacle_list)