def __init__( self, preprocessor=lambda x: (x - 127.5) / 128.0, memory_demanding=False, device=None, **kwargs, ): urls = [ "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/iresnet100-elastic.tar.gz", "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/iresnet100-elastic.tar.gz", ] filename = get_file( "iresnet100-elastic.tar.gz", urls, cache_subdir="data/pytorch/iresnet100-elastic/", file_hash="0ac36db3f0f94930993afdb27faa4f02", extract=True, ) path = os.path.dirname(filename) config = os.path.join(path, "iresnet.py") checkpoint_path = os.path.join(path, "iresnet100-elastic.pt") super(IResnet100Elastic, self).__init__( checkpoint_path, config, memory_demanding=memory_demanding, preprocessor=preprocessor, device=device, **kwargs, )
def __init__( self, protocol, annotation_type="eyes-center", fixed_positions=None, dataset_original_directory=rc.get("bob.db.meds.directory", ""), dataset_original_extension=".jpg", ): # Downloading model if not exists urls = MEDSDatabase.urls() filename = get_file("meds.tar.gz", urls, file_hash="3b01354d4c170672ac14120b80dace75") super().__init__( name="meds", dataset_protocol_path=filename, protocol=protocol, csv_to_sample_loader=make_pipeline( CSVToSampleLoaderBiometrics( data_loader=bob.io.base.load, dataset_original_directory=dataset_original_directory if dataset_original_directory else "", extension=dataset_original_extension, ), EyesAnnotations(), ), annotation_type=annotation_type, fixed_positions=fixed_positions, )
def __init__( self, database_path, database_extension=".png", idiap_path=True, include_unknow_demographics=False, load_bucket_from_cache=True, transform=None, ): self.idiap_path = idiap_path self.database_path = database_path self.database_extension = database_extension self.include_unknow_demographics = include_unknow_demographics self.load_bucket_from_cache = load_bucket_from_cache self.transform = transform # Private keys self._possible_genders = ["male", "female", "other"] # filename = "/idiap/user/tpereira/gitlab/bob/database-purgatory/wikidata/msceleb_race_wikidata.csv" urls = MSCelebTorchDataset.urls() filename = (get_file( "msceleb_race_wikidata.tar.gz", urls, file_hash="76339d73f352faa00c155f7040e772bb", extract=True, )[:-7] + ".csv") self.load_bucket(filename)
def test_meds(): from bob.bio.face.database import MEDSDatabase # Getting the absolute path urls = MEDSDatabase.urls() filename = get_file("meds.tar.gz", urls) # Removing the file before the test try: os.remove(filename) except Exception: pass database = MEDSDatabase("verification_fold1") assert len(database.background_model_samples()) == 234 assert len(database.references()) == 111 assert len(database.probes()) == 313 assert len(database.zprobes()) == 80 assert len(database.treferences()) == 80 assert len(database.references(group="eval")) == 112 assert len(database.probes(group="eval")) == 309
def test_multipie(): from bob.bio.face.database import MultipieDatabase # Getting the absolute path urls = MultipieDatabase.urls() filename = get_file("multipie.tar.gz", urls) # Removing the file before the test try: os.remove(filename) except Exception: pass protocols = MultipieDatabase.protocols() for p in protocols: database = MultipieDatabase(protocol=p) assert len(database.background_model_samples()) > 0 assert len(database.references(group="dev")) > 0 assert len(database.probes(group="dev")) > 0 assert len(database.references(group="eval")) > 0 assert len(database.probes(group="eval")) > 0 database = MultipieDatabase(protocol="P") assert len(database.background_model_samples()) == 7725 assert len(database.references(group="dev")) == 64 assert len(database.probes(group="dev")) == 3328 assert len(database.references(group="eval")) == 65 assert len(database.probes(group="eval")) == 3380
def __init__(self, memory_demanding=False, device=None, **kwargs): urls = [ "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/AFFFE-42a53f19.tar.gz", "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/AFFFE-42a53f19.tar.gz", ] filename = get_file( "AFFFE-42a53f19.tar.gz", urls, cache_subdir="data/pytorch/AFFFE-42a53f19.tar.gz", file_hash="1358bbcda62cb59b85b2418ef1f81e9b", extract=True, ) path = os.path.dirname(filename) config = os.path.join(path, "AFFFE.py") checkpoint_path = os.path.join(path, "AFFFE.pth") super(AFFFE_2021, self).__init__( checkpoint_path, config, memory_demanding=memory_demanding, device=device, **kwargs, )
def __init__( self, protocol, dataset_original_directory=rc.get("bob.bio.face.vgg2.directory", ""), dataset_original_extension=rc.get("bob.bio.face.vgg2.extension", ".jpg"), annotation_type="eyes-center", fixed_positions=None, ): # Downloading model if not exists urls = VGG2Database.urls() filename = get_file("vgg2.tar.gz", urls, file_hash="4a05d797a326374a6b52bcd8d5a89d48") super().__init__( name="vgg2", dataset_protocol_path=filename, protocol=protocol, csv_to_sample_loader=make_pipeline( CSVToSampleLoaderBiometrics( data_loader=bob.io.base.load, dataset_original_directory=dataset_original_directory, extension=dataset_original_extension, ), VGG2Annotations(), ), annotation_type=annotation_type, fixed_positions=fixed_positions, )
def __init__( self, protocol, annotation_type="eyes-center", fixed_positions=None, dataset_original_directory=rc.get("bob.db.morph.directory", ""), dataset_original_extension=".JPG", ): # Downloading model if not exists urls = MorphDatabase.urls() filename = get_file("morph.tar.gz", urls, file_hash="9efa1ff13ef6984ebfcf86f1b1f58873") super().__init__( name="morph", dataset_protocol_path=filename, protocol=protocol, csv_to_sample_loader=make_pipeline( CSVToSampleLoaderBiometrics( data_loader=bob.io.base.load, dataset_original_directory=dataset_original_directory if dataset_original_directory else "", extension=dataset_original_extension, ), EyesAnnotations(), ), annotation_type=annotation_type, fixed_positions=fixed_positions, )
def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None): # Downloading model if not exists urls = FRGCDatabase.urls() filename = get_file( "frgc.tar.gz", urls, file_hash="242168e993fe0f6f29bd59fccf3c79a0", ) super().__init__( name="frgc", dataset_protocol_path=filename, protocol=protocol, csv_to_sample_loader=make_pipeline( CSVToSampleLoaderBiometrics( data_loader=bob.io.base.load, dataset_original_directory=rc.get( "bob.bio.face.frgc.directory", ""), extension="", reference_id_equal_subject_id=False, ), EyesAnnotations(), ), annotation_type=annotation_type, fixed_positions=fixed_positions, score_all_vs_all=True, group_probes_by_reference_id=True, memory_demanding=True, ) self.hash_fn = hash_string
def __init__( self, protocol, annotation_type="eyes-center", fixed_positions=None, dataset_original_directory=rc.get("bob.db.mobio.directory", ""), dataset_original_extension=rc.get("bob.db.mobio.extension", ".png"), ): # Downloading model if not exists urls = MobioDatabase.urls() filename = get_file("mobio.tar.gz", urls, file_hash="4a7f99b33a54b2dd337ddcaecb09edb8") super().__init__( name="mobio", dataset_protocol_path=filename, protocol=protocol, csv_to_sample_loader=make_pipeline( CSVToSampleLoaderBiometrics( data_loader=bob.io.base.load, dataset_original_directory=dataset_original_directory, extension=dataset_original_extension, ), EyesAnnotations(), ), annotation_type=annotation_type, fixed_positions=fixed_positions, )
def test_morph(): from bob.bio.face.database import MorphDatabase # Getting the absolute path urls = MorphDatabase.urls() filename = get_file("morph.tar.gz", urls) # Removing the file before the test try: os.remove(filename) except Exception: pass database = MorphDatabase("verification_fold1") assert len(database.background_model_samples()) == 226 assert len(database.references()) == 6738 assert len(database.probes()) == 6557 assert len(database.zprobes()) == 66 assert len(database.treferences()) == 69 assert len(database.references(group="eval")) == 6742 assert len(database.probes(group="eval")) == 6553
def get_protocol_file(database_name: str): """Returns the protocol definition archive, downloading it if necessary. Looks for the file into ``bob_data_folder``, into the ``datasets`` folder, and downloads it from https://www.idiap.ch/software/bob/data/bob/bob.bio.spear/ if needed. """ if database_name not in known_databases: raise ValueError( f"The provided database '{database_name}' name is unknown. Use one of " f"{known_databases.keys()} or specify a dataset_protocol_path to " "'SpearBioDatabase'." ) proto_def_hash = known_databases[database_name]["crc"] proto_def_name = known_databases[database_name]["definition_file"] proto_def_urls = [ f"https://www.idiap.ch/software/bob/data/bob/bob.bio.spear/{proto_def_name}", f"http://www.idiap.ch/software/bob/data/bob/bob.bio.spear/{proto_def_name}", ] logger.info(f"Retrieving protocol definition file '{proto_def_name}'.") return get_file( filename=proto_def_name, urls=proto_def_urls, file_hash=proto_def_hash, cache_subdir="datasets", )
def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None): # Downloading model if not exists urls = MultipieDatabase.urls() filename = get_file( "multipie.tar.gz", urls, file_hash="6c27c9616c2d0373c5f052b061d80178", ) super().__init__( name="multipie", dataset_protocol_path=filename, protocol=protocol, csv_to_sample_loader=make_pipeline( CSVToSampleLoaderBiometrics( data_loader=bob.io.base.load, dataset_original_directory=rc["bob.db.multipie.directory"] if rc["bob.db.multipie.directory"] else "", extension=".png", ), MultiposeAnnotations(), ), annotation_type=["eyes-center", "left-profile", "right-profile"], fixed_positions=fixed_positions, )
def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None): # Downloading model if not exists urls = CaspealDatabase.urls() filename = get_file( "caspeal.tar.gz", urls, file_hash="1c77f660ef85fa263a2312fd8263d0d9", ) super().__init__( name="caspeal", dataset_protocol_path=filename, protocol=protocol, csv_to_sample_loader=make_pipeline( CSVToSampleLoaderBiometrics( data_loader=bob.io.base.load, dataset_original_directory=rc[ "bob.bio.face.caspeal.directory"] if rc["bob.bio.face.caspeal.directory"] else "", extension=".png", ), EyesAnnotations(), ), annotation_type=annotation_type, fixed_positions=fixed_positions, )
def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None): # Downloading model if not exists urls = ARFaceDatabase.urls() filename = get_file( "arface.tar.gz", urls, file_hash="66cf05fe03adb8d73a76fd75641dd468", ) super().__init__( name="arface", dataset_protocol_path=filename, protocol=protocol, csv_to_sample_loader=make_pipeline( CSVToSampleLoaderBiometrics( data_loader=bob.io.base.load, dataset_original_directory=rc[ "bob.bio.face.arface.directory"] if rc["bob.bio.face.arface.directory"] else "", extension=rc["bob.bio.face.arface.extension"] if rc["bob.bio.face.arface.extension"] else ".ppm", ), EyesAnnotations(), ), annotation_type=annotation_type, fixed_positions=fixed_positions, )
def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None): # Downloading model if not exists urls = CasiaAfricaDatabase.urls() filename = get_file( "casia-africa.tar.gz", urls, file_hash="080d4bfffec95a6445507065054757eb", ) directory = (rc["bob.db.casia-africa.directory"] if rc["bob.db.casia-africa.directory "] else "") super().__init__( name="casia-africa", dataset_protocol_path=filename, protocol=protocol, csv_to_sample_loader=make_pipeline( CSVToSampleLoaderBiometrics( data_loader=bob.io.base.load, dataset_original_directory=directory, extension=".jpg", reference_id_equal_subject_id=False, ), EyesAnnotations(), ), annotation_type=annotation_type, fixed_positions=fixed_positions, )
def test_scface(): from bob.bio.face.database import SCFaceDatabase # Getting the absolute path urls = SCFaceDatabase.urls() filename = get_file("scface.tar.gz", urls) # Removing the file before the test try: os.remove(filename) except Exception: pass N_WORLD, N_DEV, N_EVAL = 43, 44, 43 N_WORLD_DISTANCES = 3 N_RGB_CAMS, N_IR_CAMS = 5, 2 N_RGB_MUGSHOTS, N_IR_MUGSHOTS = 1, 1 def _check_protocol(p, n_mugshots, n_cams, n_distances): database = SCFaceDatabase(protocol=p) assert len(database.background_model_samples()) == N_WORLD * ( n_mugshots + N_WORLD_DISTANCES * n_cams ) assert len(database.references(group="dev")) == N_DEV * n_mugshots assert len(database.probes(group="dev")) == N_DEV * n_distances * n_cams assert len(database.references(group="eval")) == N_EVAL * n_mugshots assert ( len(database.probes(group="eval")) == N_EVAL * n_distances * n_cams ) return p checked_protocols = [] checked_protocols.append( _check_protocol("combined", N_RGB_MUGSHOTS, N_RGB_CAMS, n_distances=3) ) checked_protocols.append( _check_protocol("close", N_RGB_MUGSHOTS, N_RGB_CAMS, n_distances=1) ) checked_protocols.append( _check_protocol("medium", N_RGB_MUGSHOTS, N_RGB_CAMS, n_distances=1) ) checked_protocols.append( _check_protocol("far", N_RGB_MUGSHOTS, N_RGB_CAMS, n_distances=1) ) checked_protocols.append( _check_protocol("IR", N_IR_MUGSHOTS, N_IR_CAMS, n_distances=1) ) for p in SCFaceDatabase.protocols(): assert p in checked_protocols, "Protocol {} untested".format(p)
def __init__( self, protocol, annotation_type="bounding-box", fixed_positions=None, original_directory=rc.get("bob.bio.video.youtube.directory", ""), extension=".jpg", annotation_extension=".labeled_faces.txt", frame_selector=None, ): self._check_protocol(protocol) original_directory = original_directory or "" if not os.path.exists(original_directory): logger.warning( "Invalid or non existent `original_directory`: f{original_directory}." "Please, do `bob config set bob.bio.video.youtube.directory PATH` to set the Youtube data directory." ) urls = YoutubeDatabase.urls() cache_subdir = os.path.join("datasets", "youtube_protocols") self.filename = get_file( "youtube_protocols-6962cd2e.tar.gz", urls, file_hash="8a4792872ff30b37eab7f25790b0b10d", extract=True, cache_subdir=cache_subdir, ) self.protocol_path = os.path.dirname(self.filename) self.references_dict = {} self.probes_dict = {} # Dict that holds a `subject_id` as a key and has # filenames as values self.subject_id_files = {} self.reference_id_to_subject_id = None self.reference_id_to_sample = None self.load_file_client_id() self.original_directory = original_directory self.extension = extension self.annotation_extension = annotation_extension self.frame_selector = frame_selector super().__init__( name="youtube", protocol=protocol, score_all_vs_all=False, annotation_type=annotation_type, fixed_positions=None, memory_demanding=True, )
def _get_iresnet_file(): urls = [ "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/iresnet-91a5de61.tar.gz", "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/iresnet-91a5de61.tar.gz", ] return get_file( "iresnet-91a5de61.tar.gz", urls, cache_subdir="data/pytorch/iresnet-91a5de61/", file_hash="3976c0a539811d888ef5b6217e5de425", extract=True, )
def __init__(self, prob_thresh=0.5, **kwargs): super().__init__(**kwargs) import mxnet as mx urls = [ "https://www.idiap.ch/software/bob/data/bob/bob.ip.facedetect/master/tinyface_detector.tar.gz" ] filename = get_file( "tinyface_detector.tar.gz", urls, cache_subdir="data/tinyface_detector", file_hash="f24e820b47a7440d7cdd7e0c43d4d455", extract=True, ) self.checkpoint_path = os.path.dirname(filename) self.MAX_INPUT_DIM = 5000.0 self.prob_thresh = prob_thresh self.nms_thresh = 0.1 self.model_root = pkg_resources.resource_filename( __name__, self.checkpoint_path) sym, arg_params, aux_params = mx.model.load_checkpoint( os.path.join(self.checkpoint_path, "hr101"), 0) all_layers = sym.get_internals() meta_file = open(os.path.join(self.checkpoint_path, "meta.pkl"), "rb") self.clusters = pickle.load(meta_file) self.averageImage = pickle.load(meta_file) meta_file.close() self.clusters_h = self.clusters[:, 3] - self.clusters[:, 1] + 1 self.clusters_w = self.clusters[:, 2] - self.clusters[:, 0] + 1 self.normal_idx = np.where(self.clusters[:, 4] == 1) self.mod = mx.mod.Module( symbol=all_layers["fusex_output"], data_names=["data"], label_names=None, ) self.mod.bind( for_training=False, data_shapes=[("data", (1, 3, 224, 224))], label_shapes=None, force_rebind=False, ) self.mod.set_params(arg_params=arg_params, aux_params=aux_params, force_init=False)
def download_faceX_model(): urls = [ "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/faceX_models.tar.gz", "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/faceX_models.tar.gz", ] filename = get_file( "faceX_models.tar.gz", urls, cache_subdir="data/pytorch/", file_hash="eb7ec871f434d2f44e5408627d656297", extract=True, ) return filename
def get_facexzoo_file(self): urls = [ "https://www.idiap.ch/software/bob/data/bob/bob.learn.pytorch/facexzoomodels/{}".format( self.info[self.arch][0] ), "http://www.idiap.ch/software/bob/data/bob/bob.learn.pytorch/facexzoomodels/{}".format( self.info[self.arch][0] ), ] return get_file( self.info[self.arch][0], urls, cache_subdir="data/pytorch/{}/".format(self.info[self.arch][0]), file_hash=self.info[self.arch][1], extract=True, )
def __init__(self, embedding_layer="fc7"): urls = [ "https://www.robots.ox.ac.uk/~vgg/software/vgg_face/src/vgg_face_caffe.tar.gz", "http://bobconda.lab.idiap.ch/public-upload/data/bob/bob.bio.face/master/caffe/vgg_face_caffe.tar.gz", ] filename = get_file( "vgg_face_caffe.tar.gz", urls, cache_subdir="data/caffe/vgg_face_caffe", file_hash="ee707ac6e890bc148cb155adeaad12be", extract=True, ) path = os.path.dirname(filename) config = os.path.join( path, "vgg_face_caffe", "VGG_FACE_deploy.prototxt" ) checkpoint_path = os.path.join( path, "vgg_face_caffe", "VGG_FACE.caffemodel" ) caffe_average_img = [129.1863, 104.7624, 93.5940] self.embedding_layer = embedding_layer def preprocessor(X): """ Normalize using data from caffe Caffe has the shape `C x H x W` and the chanel is BGR and """ # Subtracting X[:, 0, :, :] -= caffe_average_img[0] X[:, 1, :, :] -= caffe_average_img[1] X[:, 2, :, :] -= caffe_average_img[2] # To BGR X = X[:, ::-1, :, :].astype("float32") return X super(VGG16_Oxford, self).__init__( checkpoint_path, config, preprocessor )
def __init__( self, model_name, memory_demanding=False, device=None, **kwargs, ): urls = [ "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/oxford_resnet50_vgg2.tar.gz", "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/oxford_resnet50_vgg2.tar.gz", ] filename = get_file( "oxford_resnet50_vgg2.tar.gz", urls, cache_subdir="data/pytorch/oxford_resnet50_vgg2/", file_hash="c8e1ed3715d83647b4a02e455213aaf0", extract=True, ) models_available = [ "resnet50_scratch_dag", "resnet50_ft_dag", "senet50_ft_dag", "senet50_scratch_dag", ] if model_name not in models_available: raise ValueError( f"Invalid model {model_name}. The models available are {models_available}" ) self.model_name = model_name path = os.path.dirname(filename) config = os.path.join(path, model_name, f"{model_name}.py") checkpoint_path = os.path.join(path, model_name, f"{model_name}.pth") super(OxfordVGG2Resnets, self).__init__( checkpoint_path, config, memory_demanding=memory_demanding, preprocessor=self.dag_preprocessor, device=device, **kwargs, )
def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None): # Downloading model if not exists urls = CBSRNirVis2Database.urls() filename = get_file( "cbsr-nir-vis2.tar.gz", urls, file_hash="e4bda52ab6754556783d6730eccc2ae2", ) directory = (rc["bob.db.cbsr-nir-vis-2.directory"] if rc["bob.db.cbsr-nir-vis-2.directory"] else "") def load(filename): extensions = [".jpg", ".bmp"] for e in extensions: f = os.path.splitext(filename)[0] new_filename = f + e if os.path.exists(new_filename): return bob.io.base.load(new_filename) else: raise ValueError("File `{0}` not found".format( str(new_filename))) super().__init__( name="cbsr-nir-vis2", dataset_protocol_path=filename, protocol=protocol, csv_to_sample_loader=make_pipeline( CSVToSampleLoaderBiometrics( data_loader=load, dataset_original_directory=directory, extension=".jpg", ), EyesAnnotations(), ), annotation_type=annotation_type, fixed_positions=fixed_positions, )
def __init__(self, memory_demanding=False, **kwargs): urls = [ "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/facenet_sanderberg_20170512_110547.tar.gz" ] filename = get_file( "facenet_sanderberg_20170512_110547.tar.gz", urls, cache_subdir="data/tensorflow/facenet_sanderberg_20170512_110547", file_hash="734d1c997c10acdcdffc79fb51a2e715", extract=True, ) checkpoint_path = os.path.dirname(filename) super(FaceNetSanderberg_20170512_110547, self).__init__( checkpoint_path, tf.image.per_image_standardization, memory_demanding=memory_demanding, **kwargs, )
def __init__( self, protocol, annotation_type="eyes-center", fixed_positions=None, original_directory=rc.get("bob.bio.face.gbu.directory"), extension=".jpg", ): # self.filename = "/idiap/user/tpereira/gitlab/bob/bob.nightlies/temp/gbu.tar.gz" # Downloading model if not exists urls = GBUDatabase.urls() self.filename = get_file( "gbu-xmls.tar.gz", urls, file_hash="827de43434ee84020c6a949ece5e4a4d", ) self.references_dict = {} self.probes_dict = {} self.annotations = None self.original_directory = original_directory self.extension = extension self.background_samples = None self._background_files = [ "GBU_Training_Uncontrolledx1.xml", "GBU_Training_Uncontrolledx2.xml", "GBU_Training_Uncontrolledx4.xml", "GBU_Training_Uncontrolledx8.xml", ] super().__init__( name="gbu", protocol=protocol, score_all_vs_all=True, annotation_type="eyes-center", fixed_positions=fixed_positions, memory_demanding=True, )
def test_vgg2(): from bob.bio.face.database import VGG2Database # Getting the absolute path urls = VGG2Database.urls() filename = get_file("vgg2.tar.gz", urls) # Removing the file before the test try: os.remove(filename) except Exception: pass p = "vgg2-short" database = VGG2Database(protocol=p) # Sanity check on vgg2-short assert len(database.treferences()) == 194 assert len(database.zprobes()) == 200 # vgg2-full has 3'141'890 SAMPLES assert len(database.background_model_samples()) == 86310 assert len(database.references()) == 500 assert len(database.probes()) == 2500 p = "vgg2-short-with-eval" database = VGG2Database(protocol=p) # Sanity check on vgg2-short assert len(database.treferences()) == 194 assert len(database.zprobes()) == 200 # vgg2-full has 3'141'890 SAMPLES assert len(database.background_model_samples()) == 86310 assert len(database.references(group="dev")) == 250 assert len(database.probes(group="dev")) == 1250 assert len(database.references(group="eval")) == 250 assert len(database.probes(group="eval")) == 1250
def __init__(self, memory_demanding=False, **kwargs): urls = [ "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_vgg2_arcface_2021.tar.gz", "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_vgg2_arcface_2021.tar.gz", ] filename = get_file( "resnet50_vgg2_arcface_2021.tar.gz", urls, cache_subdir="data/tensorflow/resnet50_vgg2_arcface_2021", file_hash="64f89c8cb55e7a0d9c7e13ff412b6a13", extract=True, ) checkpoint_path = os.path.dirname(filename) super(Resnet50_VGG2_ArcFace_2021, self).__init__( checkpoint_path, preprocessor=lambda X: X / 255.0, memory_demanding=memory_demanding, **kwargs, )
def __init__( self, database_path=rc.get("bob.bio.face.webface42M.directory", ""), transform=None, ): self.database_path = database_path if database_path == "": raise ValueError( "`database_path` is empty; please do `bob config set bob.bio.face.webface42M.directory` to set the absolute path of the data" ) urls = WebFace42M.urls() filename = get_file( "webface42M.tar.gz", urls, file_hash="50c32cbe61de261466e1ea3af2721cea", ) self.file = search_file(filename, "webface42M.csv") self._line_offset = 51 self.transform = transform