def __init__(self, targets, batch_size, rank, local_rank=0, num_workers=1, logger=None, nfolds=10, norm=True): self.metric = FaceVerification(nfolds) self.norm = norm self.targets = targets self.ctx = mx.gpu(local_rank) self.logger = logger if rank >= len(targets): self.skip_validate = True else: self.skip_validate = False # In each process we only do validation for one val set. self.name = targets[rank] val_set = get_recognition_dataset(self.name, transform=transform_test) self.loader = gluon.data.DataLoader(val_set, batch_size, num_workers=num_workers)
class ParallelValidation: def __init__(self, targets, batch_size, rank, local_rank=0, num_workers=1, logger=None, nfolds=10, norm=True): self.metric = FaceVerification(nfolds) self.norm = norm self.targets = targets self.ctx = mx.gpu(local_rank) self.logger = logger if rank >= len(targets): self.skip_validate = True else: self.skip_validate = False # In each process we only do validation for one val set. self.name = targets[rank] val_set = get_recognition_dataset(self.name, transform=transform_test) self.loader = gluon.data.DataLoader(val_set, batch_size, num_workers=num_workers) def __call__(self, net, *args, **kwargs): if not self.skip_validate: self.metric.reset() for batch in self.loader: data0 = batch[0][0].as_in_context(self.ctx) data1 = batch[0][1].as_in_context(self.ctx) issame = batch[1].as_in_context(self.ctx) embedding0 = net(data0)[0] embedding1 = net(data1)[0] if self.norm: embedding0 = nd.L2Normalization(embedding0) embedding1 = nd.L2Normalization(embedding1) self.metric.update(issame, embedding0, embedding1) tpr, fpr, accuracy, val, val_std, far, accuracy_std = self.metric.get( ) text = "{}: {:.6f}+-{:.6f}".format(self.name, accuracy, accuracy_std) if self.logger is None: print(text) else: self.logger.info(text)
def validate(net, ctx, val_datas, targets, nfolds=10, norm=True): metric = FaceVerification(nfolds) results = [] for loader, name in zip(val_datas, targets): metric.reset() for i, batch in enumerate(loader): data0s = gluon.utils.split_and_load(batch[0][0], ctx, even_split=False) data1s = gluon.utils.split_and_load(batch[0][1], ctx, even_split=False) issame_list = gluon.utils.split_and_load(batch[1], ctx, even_split=False) embedding0s = [net(X)[0] for X in data0s] embedding1s = [net(X)[0] for X in data1s] if norm: embedding0s = [nd.L2Normalization(e) for e in embedding0s] embedding1s = [nd.L2Normalization(e) for e in embedding1s] for embedding0, embedding1, issame in zip(embedding0s, embedding1s, issame_list): metric.update(issame, embedding0, embedding1) tpr, fpr, accuracy, val, val_std, far, accuracy_std = metric.get() results.append("{}: {:.6f}+-{:.6f}".format(name, accuracy, accuracy_std)) return results
def validate(nfolds=10, norm=True): metric = FaceVerification(nfolds) results = [] for loader, name in zip(val_datas, targets.split(",")): metric.reset() for i, batch in enumerate(loader): data0s = gluon.utils.split_and_load(batch[0][0], ctx, even_split=False) data1s = gluon.utils.split_and_load(batch[0][1], ctx, even_split=False) issame_list = gluon.utils.split_and_load(batch[1], ctx, even_split=False) embedding0s = [ train_net(X.astype(dtype, copy=False))[0] for X in data0s ] embedding1s = [ train_net(X.astype(dtype, copy=False))[0] for X in data1s ] if norm: embedding0s = [ sklearn.preprocessing.normalize(e.asnumpy()) for e in embedding0s ] embedding1s = [ sklearn.preprocessing.normalize(e.asnumpy()) for e in embedding1s ] for embedding0, embedding1, issame in zip(embedding0s, embedding1s, issame_list): metric.update(issame, embedding0, embedding1) tpr, fpr, accuracy, val, val_std, far, accuracy_std = metric.get() results.append("{}: {:.6f}+-{:.6f}".format(name, accuracy, accuracy_std)) return results
def validate(nfolds=10): metric = FaceVerification(nfolds) metric_flip = FaceVerification(nfolds) for loader, name in zip(val_datas, targets.split(",")): metric.reset() for i, batch in enumerate(loader): data0s = gluon.utils.split_and_load(batch[0][0][0], ctx, even_split=False) data1s = gluon.utils.split_and_load(batch[0][1][0], ctx, even_split=False) data0s_flip = gluon.utils.split_and_load(batch[0][0][1], ctx, even_split=False) data1s_flip = gluon.utils.split_and_load(batch[0][1][1], ctx, even_split=False) issame_list = gluon.utils.split_and_load(batch[1], ctx, even_split=False) embedding0s = [test_net(X) for X in data0s] embedding1s = [test_net(X) for X in data1s] embedding0s_flip = [test_net(X) for X in data0s_flip] embedding1s_flip = [test_net(X) for X in data1s_flip] emb0s = [ nd.L2Normalization(e, mode='instance') for e in embedding0s ] emb1s = [ nd.L2Normalization(e, mode='instance') for e in embedding1s ] for embedding0, embedding1, issame in zip(emb0s, emb1s, issame_list): metric.update(issame, embedding0, embedding1) emb0s_flip = [ nd.L2Normalization(nd.concatenate([e, ef], 1), mode='instance') for e, ef in zip(embedding0s, embedding0s_flip) ] emb1s_flip = [ nd.L2Normalization(nd.concatenate([e, ef], 1), mode='instance') for e, ef in zip(embedding1s, embedding1s_flip) ] for embedding0, embedding1, issame in zip(emb0s_flip, emb1s_flip, issame_list): metric_flip.update(issame, embedding0, embedding1) tpr, fpr, accuracy, val, val_std, far, accuracy_std = metric.get() print("{}: \t{:.6f}+-{:.6f}".format(name, accuracy, accuracy_std)) _, _, accuracy, _, _, _, accuracy_std = metric_flip.get() print("{}-flip: {:.6f}+-{:.6f}".format(name, accuracy, accuracy_std))