def predict(image):

    image = image_loader(image=image)

    BACKBONE = IR_50(INPUT_SIZE)
    HEAD = ArcFace(in_features=EMBEDDING_SIZE,
                   out_features=1000,
                   device_id=GPU_ID)

    BACKBONE = BACKBONE.to(DEVICE)
    HEAD = HEAD.to(DEVICE)

    BACKBONE.load_state_dict(
        torch.load('./trained_model/Backbone_IR_50_ArcFace_30.pth'))
    HEAD.load_state_dict(
        torch.load('./trained_model/Head_IR_50_ArcFace_30.pth'))

    BACKBONE.eval()
    HEAD.eval()

    image = image.to(DEVICE)
    bs, ncrops, c, h, w = image.size()
    inputs = image.view(-1, c, h, w)
    features = BACKBONE(inputs)
    outputs = HEAD(features, None)
    outputs = outputs.view(bs, ncrops, -1).mean(1)
    top_probs, top_labs = outputs.data.topk(1)
    top_labs = top_labs.cpu().numpy()
    top_probs = top_probs.cpu().numpy()
    return int(top_labs), float(top_probs)
Example #2
0
		backbone_paras_only_bn, backbone_paras_wo_bn = separate_resnet_bn_paras(BACKBONE) # separate batch_norm parameters from others; do not do weight decay for batch_norm parameters to improve the generalizability
		_, head_paras_wo_bn = separate_resnet_bn_paras(HEAD)
	OPTIMIZER = optim.SGD([{'params': backbone_paras_wo_bn + head_paras_wo_bn, 'weight_decay': WEIGHT_DECAY}, {'params': backbone_paras_only_bn}], lr = LR, momentum = MOMENTUM)
	print("=" * 60)
	print(OPTIMIZER)
	print("Optimizer Generated")
	print("=" * 60)

	# optionally resume from a checkpoint
	if BACKBONE_RESUME_ROOT and HEAD_RESUME_ROOT:
		print("=" * 60)
		if os.path.isfile(BACKBONE_RESUME_ROOT) and os.path.isfile(HEAD_RESUME_ROOT):
			print("Loading Backbone Checkpoint '{}'".format(BACKBONE_RESUME_ROOT))
			BACKBONE.load_state_dict(torch.load(BACKBONE_RESUME_ROOT))
			print("Loading Head Checkpoint '{}'".format(HEAD_RESUME_ROOT))
			HEAD.load_state_dict(torch.load(HEAD_RESUME_ROOT))
		else:
			print("No Checkpoint Found at '{}' and '{}'. Please Have a Check or Continue to Train from Scratch".format(BACKBONE_RESUME_ROOT, HEAD_RESUME_ROOT))
		print("=" * 60)

	if MULTI_GPU:
		# multi-GPU setting
		BACKBONE = nn.DataParallel(BACKBONE, device_ids = GPU_ID)
		BACKBONE = BACKBONE.to(DEVICE)
		HEAD = nn.DataParallel(HEAD, device_ids = GPU_ID)
		HEAD = HEAD.to(DEVICE)
	else:
		# single-GPU setting
		BACKBONE = BACKBONE.to(DEVICE)
		HEAD = HEAD.to(DEVICE)
		backbone_paras_only_bn, backbone_paras_wo_bn = separate_resnet_bn_paras(BACKBONE) # separate batch_norm parameters from others; do not do weight decay for batch_norm parameters to improve the generalizability
		_, head_paras_wo_bn = separate_resnet_bn_paras(HEAD)
	OPTIMIZER = optim.SGD([{'params': backbone_paras_wo_bn + head_paras_wo_bn, 'weight_decay': WEIGHT_DECAY}, {'params': backbone_paras_only_bn}], lr = LR, momentum = MOMENTUM)
	print("=" * 60)
	print(OPTIMIZER)
	print("Optimizer Generated")
	print("=" * 60)

	# optionally resume from a checkpoint
	if BACKBONE_RESUME_ROOT and HEAD_RESUME_ROOT:
		print("=" * 60)
		if os.path.isfile(BACKBONE_RESUME_ROOT) and os.path.isfile(HEAD_RESUME_ROOT):
			print("Loading Backbone Checkpoint '{}'".format(BACKBONE_RESUME_ROOT))
			BACKBONE.load_state_dict(torch.load(BACKBONE_RESUME_ROOT, map_location=lambda storage, loc: storage.cuda()))
			print("Loading Head Checkpoint '{}'".format(HEAD_RESUME_ROOT))
			HEAD.load_state_dict(torch.load(HEAD_RESUME_ROOT, map_location=lambda storage, loc: storage.cuda()))
		else:
			print("No Checkpoint Found at '{}' and '{}'. Please Have a Check or Continue to Train from Scratch".format(BACKBONE_RESUME_ROOT, HEAD_RESUME_ROOT))
		print("=" * 60)



	# multi-GPU setting
	BACKBONE = BACKBONE.cuda()
	BACKBONE = nn.parallel.DistributedDataParallel(BACKBONE, device_ids = [args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
	HEAD = HEAD.cuda()
	HEAD = nn.parallel.DistributedDataParallel(HEAD, device_ids = [args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
#	BACKBONE = nn.parallel.DistributedDataParallel(BACKBONE)
#	LOSS = nn.parallel.DistributedDataParallel(LOSS, device_ids = [args.local_rank], output_device=args.local_rank)
#	HEAD = nn.parallel.DistributedDataParallel(HEAD)