def cmd_unlock(args): if not args.quiet: print("Unlocking database {0}".format(args.database)) if not args.dry_run: database = Database.instance(args.database) mapper = DatabaseMapper(database) mapper.set_destroy_lock(False)
def main(): training_file = open('training.txt','w') testing_file = open('testing.txt','w') training_images = {'blur':list(), 'noblur':list()} testing_images = {'blur':list(), 'noblur':list()} parser = argparse.ArgumentParser(description='generates training/testing files for blur') parser.add_argument('-l', '--limit', type=int, metavar='COUNT', required=False, help='Maximum number of images to use') parser.add_argument('-r', '--random', action="store_true", default=False, required=False, help='Fetch images ordered randomly if limit is active') parser.add_argument('--tag_require', action='append', dest='tags_require', default=None, required=False, help='Tag that must be present on selected images') parser.add_argument('--tag_exclude', action='append', dest='tags_exclude', default=None, required=False, help='Tag that must not be present on selected images') parser.add_argument('-p', '--percent_training', dest='percent', default=0.25, required=False, help='Tag indicating what percent of images for training') parser.add_argument('database', help='Name of database to use') args = parser.parse_args() db = Database.instance(args.database) db_mapper = DatabaseMapper(db) images = db_mapper.get_images_for_analysis(kDomain, limit=args.limit, random=args.random, tags_require=args.tags_require, tags_exclude=args.tags_exclude) blur_images = list() noblur_images = list() for image in images: if image['annotations'][0]['model'] == 'blur': blur_images.append(image) else: noblur_images.append(image) random.shuffle(blur_images) random.shuffle(noblur_images) blur_training_len = int(len(blur_images)*float(args.percent)) noblur_training_len = int(len(noblur_images)*float(args.percent)) training_images['blur'] = blur_images[:blur_training_len] testing_images['blur'] = blur_images[blur_training_len:] training_images['noblur'] = noblur_images[:noblur_training_len] testing_images['noblur'] = noblur_images[noblur_training_len:] for file,image_dict in ((training_file,training_images),(testing_file,testing_images)): for model in image_dict.keys(): for image in image_dict[model]: file.write('{}\t{}\n'.format(rigor.imageops.find(image), model))
def main(): parser = argparse.ArgumentParser( description='Runs text detector on relevant images') parser.add_argument('classifier_file', help='Path to classifier CLF') parser.add_argument('-l', '--limit', type=int, metavar='COUNT', required=False, help='Maximum number of images to use') parser.add_argument( '-r', '--random', action="store_true", default=False, required=False, help='Fetch images ordered randomly if limit is active') parser.add_argument('database', help='Database to use') args = parser.parse_args() parameters["classifier_file"] = args.classifier_file i = rigor.runner.Runner('text', parameters, limit=args.limit, random=args.random) database_mapper = DatabaseMapper(Database.instance(args.database)) for result in i.run(): detected = result[1] expected = result[2] image = database_mapper.get_image_by_id(result[0]) cv_image = rigor.imageops.fetch(image) cv2.polylines(cv_image, expected, True, cv2.RGB(0, 255, 0)) cv2.polylines(cv_image, detected, True, cv2.RGB(255, 255, 0)) cv2.imwrite(".".join((str(image["id"]), image["format"])), cv_image)
def cmd_lock(args): if not args.quiet: print("Locking database {0}".format(args.database)) if not args.dry_run: database = Database.instance(args.database) mapper = DatabaseMapper(database) mapper.set_destroy_lock(True)
def cmd_patch(args): database = Database.instance(args.database) mapper = DatabaseMapper(database) start_level = mapper.get_patch_level() + 1 stop_level = None if args.level: stop_level = args.level patch(mapper, args.patch_dir, start_level, stop_level, args.dry_run, args.quiet)
def cmd_clone(args): if not args.quiet: print("Cloning database {0} to {1}".format(args.source, args.destination)) if not args.dry_run: Database.cls().clone(args.source, args.destination) database = Database.instance(args.destination) mapper = DatabaseMapper(database) mapper.set_destroy_lock(False) # new databases are always unlocked
def cmd_destroy(args): if not args.quiet: print("Destroying database {0}".format(args.database)) database = Database.instance(args.database) mapper = DatabaseMapper(database) if mapper.get_destroy_lock(): sys.stderr.write("Error: database is locked\n") sys.exit(2) mapper = None database = None if not args.dry_run: Database.cls().drop(args.database)
def main(): rigor.domain.money.init(parameters) logger = rigor.logger.getLogger(__file__) database_mapper = DatabaseMapper(Database.instance(kDatabase)) logger.debug('Fetching image IDs from database') images = database_mapper.get_images_for_analysis(kDomain, kLimit, False) for parameter_set in get_parameters(): timestamp = datetime.utcnow().strftime("{0}-%Y%m%d_%H%M%S%f".format(kDomain)) with open("{0}.params".format(timestamp), "w") as parameter_file: json.dump(parameter_set, parameter_file) parameter_file.write("\n") with open("{0}.results".format(timestamp), "w") as result_file: image_config = partial(rigor.domain.money.run, parameters=parameter_set) logger.debug('Processing {0} images'.format(len(images))) for result in map(image_config, images): result_file.write("\t".join([str(x) for x in result])) result_file.write("\n")
def main(): parser = argparse.ArgumentParser(description='Runs text detector on relevant images') parser.add_argument('classifier_file', help='Path to classifier CLF') parser.add_argument('-l', '--limit', type=int, metavar='COUNT', required=False, help='Maximum number of images to use') parser.add_argument('-r', '--random', action="store_true", default=False, required=False, help='Fetch images ordered randomly if limit is active') parser.add_argument('database', help='Database to use') args = parser.parse_args() parameters["classifier_file"] = args.classifier_file i = rigor.runner.Runner('text', parameters, limit=args.limit, random=args.random) database_mapper = DatabaseMapper(Database.instance(args.database)) for result in i.run(): detected = result[1] expected = result[2] image = database_mapper.get_image_by_id(result[0]) cv_image = rigor.imageops.fetch(image) cv2.polylines(cv_image, expected, True, cv2.RGB(0, 255, 0)) cv2.polylines(cv_image, detected, True, cv2.RGB(255, 255, 0)) cv2.imwrite(".".join((str(image["id"]), image["format"])), cv_image)
def cmd_create(args): if not args.quiet: print("Creating database {0}".format(args.database)) if not args.dry_run: Database.cls().create(args.database) stop_level = None if args.level: stop_level = args.level try: database = Database.instance(args.database) mapper = DatabaseMapper(database) patch(mapper, args.patch_dir, 0, stop_level, args.dry_run, True) except: # Save exception for later exc_info = sys.exc_info() try: Database.cls().drop(args.database) except: pass raise exc_info[0], exc_info[1], exc_info[2]
def __init__(self, database): self._dbmapper = DatabaseMapper(Database.instance(database))
"""" Script to delete ground truth (image, thumbnail, and all!) """ import argparse import rigor.imageops from rigor.dbmapper import DatabaseMapper from rigor.database import Database parser = argparse.ArgumentParser( description='Deletes ground truth (image, thumbnail, and all!)') parser.add_argument('database', help='Name of database to use') parser.add_argument('delete_ids', metavar='delete_id', nargs='+', type=int, help='ID(s) of images to delete') args = parser.parse_args() db = Database.instance(args.database) db_mapper = DatabaseMapper(db) for image_id in args.delete_ids: image = db_mapper.get_image_by_id(image_id) print("OBLITERATING {}".format(image['id'])) rigor.imageops.destroy_image(db, image)
"""" Script to delete ground truth (image, thumbnail, and all!) """ import argparse import rigor.imageops from rigor.dbmapper import DatabaseMapper from rigor.database import Database parser = argparse.ArgumentParser(description='Deletes ground truth (image, thumbnail, and all!)') parser.add_argument('database', help='Name of database to use') parser.add_argument('delete_ids', metavar='delete_id', nargs='+', type=int, help='ID(s) of images to delete') args = parser.parse_args() db = Database.instance(args.database) db_mapper = DatabaseMapper(db) for image_id in args.delete_ids: image = db_mapper.get_image_by_id(image_id) print("OBLITERATING {}".format(image['id'])) rigor.imageops.destroy_image(db, image)
def main(): training_file = open('training.txt', 'w') testing_file = open('testing.txt', 'w') training_images = {'blur': list(), 'noblur': list()} testing_images = {'blur': list(), 'noblur': list()} parser = argparse.ArgumentParser( description='generates training/testing files for blur') parser.add_argument('-l', '--limit', type=int, metavar='COUNT', required=False, help='Maximum number of images to use') parser.add_argument( '-r', '--random', action="store_true", default=False, required=False, help='Fetch images ordered randomly if limit is active') parser.add_argument('--tag_require', action='append', dest='tags_require', default=None, required=False, help='Tag that must be present on selected images') parser.add_argument('--tag_exclude', action='append', dest='tags_exclude', default=None, required=False, help='Tag that must not be present on selected images') parser.add_argument( '-p', '--percent_training', dest='percent', default=0.25, required=False, help='Tag indicating what percent of images for training') parser.add_argument('database', help='Name of database to use') args = parser.parse_args() db = Database.instance(args.database) db_mapper = DatabaseMapper(db) images = db_mapper.get_images_for_analysis(kDomain, limit=args.limit, random=args.random, tags_require=args.tags_require, tags_exclude=args.tags_exclude) blur_images = list() noblur_images = list() for image in images: if image['annotations'][0]['model'] == 'blur': blur_images.append(image) else: noblur_images.append(image) random.shuffle(blur_images) random.shuffle(noblur_images) blur_training_len = int(len(blur_images) * float(args.percent)) noblur_training_len = int(len(noblur_images) * float(args.percent)) training_images['blur'] = blur_images[:blur_training_len] testing_images['blur'] = blur_images[blur_training_len:] training_images['noblur'] = noblur_images[:noblur_training_len] testing_images['noblur'] = noblur_images[noblur_training_len:] for file, image_dict in ((training_file, training_images), (testing_file, testing_images)): for model in image_dict.keys(): for image in image_dict[model]: file.write('{}\t{}\n'.format(rigor.imageops.find(image), model))