Esempio n. 1
0
        'ArcFace':
        ArcFace(in_features=EMBEDDING_SIZE, out_features=NUM_CLASS),
        'CosFace':
        CosFace(in_features=EMBEDDING_SIZE, out_features=NUM_CLASS),
        'SphereFace':
        SphereFace(in_features=EMBEDDING_SIZE, out_features=NUM_CLASS),
        'Am_softmax':
        Am_softmax(in_features=EMBEDDING_SIZE, out_features=NUM_CLASS)
    }
    HEAD = HEAD_DICT[HEAD_NAME]
    print("=" * 60)
    print(HEAD)
    print("{} Head Generated".format(HEAD_NAME))
    print("=" * 60)

    LOSS_DICT = {'Focal': FocalLoss(), 'Softmax': nn.CrossEntropyLoss()}
    LOSS = LOSS_DICT[LOSS_NAME]
    print("=" * 60)
    print(LOSS)
    print("{} Loss Generated".format(LOSS_NAME))
    print("=" * 60)

    if BACKBONE_NAME.find("IR") >= 0:
        backbone_paras_only_bn, backbone_paras_wo_bn = separate_irse_bn_paras(
            BACKBONE
        )  # separate batch_norm parameters from others; do not do weight decay for batch_norm parameters to improve the generalizability
        _, head_paras_wo_bn = separate_irse_bn_paras(HEAD)
    else:
        backbone_paras_only_bn, backbone_paras_wo_bn = separate_resnet_bn_paras(
            BACKBONE
        )  # separate batch_norm parameters from others; do not do weight decay for batch_norm parameters to improve the generalizability
Esempio n. 2
0
		HEAD = CosFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS, device_id = GPU_ID)
	elif HEAD_NAME == 'SphereFace':
		HEAD = SphereFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS, device_id = GPU_ID)
	elif HEAD_NAME == 'Am_softmax':
		HEAD = Am_softmax(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS, device_id = GPU_ID)
	elif HEAD_NAME == 'Softmax':
		HEAD = Softmax(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS, device_id = GPU_ID)
	else:
		raise NotImplementedError
	print("=" * 60)
	print(HEAD)
	print("{} Head Generated".format(HEAD_NAME))
	print("=" * 60)

	if LOSS_NAME == 'Focal':
		LOSS = FocalLoss()
	elif LOSS_NAME == 'Softmax':
		LOSS = nn.CrossEntropyLoss()
	else:
		raise NotImplementedError
	print("=" * 60)
	print(LOSS)
	print("{} Loss Generated".format(LOSS_NAME))
	print("=" * 60)

	if BACKBONE_NAME.find("IR") >= 0:
		backbone_paras_only_bn, backbone_paras_wo_bn = separate_irse_bn_paras(BACKBONE) # separate batch_norm parameters from others; do not do weight decay for batch_norm parameters to improve the generalizability
		_, head_paras_wo_bn = separate_irse_bn_paras(HEAD)
	else:
		backbone_paras_only_bn, backbone_paras_wo_bn = separate_resnet_bn_paras(BACKBONE) # separate batch_norm parameters from others; do not do weight decay for batch_norm parameters to improve the generalizability
		_, head_paras_wo_bn = separate_resnet_bn_paras(HEAD)
Esempio n. 3
0
    print("=" * 60)
    print(BACKBONE)
    print("{} Backbone Generated".format(BACKBONE_NAME))
    print("=" * 60)

    HEAD_DICT = {'ArcFace': ArcFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS, device_id = GPU_ID),
                 'CosFace': CosFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS, device_id = GPU_ID),
                 'SphereFace': SphereFace(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS, device_id = GPU_ID),
                 'Am_softmax': Am_softmax(in_features = EMBEDDING_SIZE, out_features = NUM_CLASS, device_id = GPU_ID)}
    HEAD = HEAD_DICT[HEAD_NAME]
    print("=" * 60)
    print(HEAD)
    print("{} Head Generated".format(HEAD_NAME))
    print("=" * 60)

    LOSS_DICT = {'Focal': FocalLoss(), 
                 'Softmax': nn.CrossEntropyLoss()
                 'AdaCos' : AdaCos(),
                 'AdaM_Softmax': AdaM_Softmax() ,
                 'ArcFace' : ArcFace() ,
                 'ArcNegFace': ArcNegFace(),
                 'CircleLoss': Circleloss(),
                 'CurricularFace': CurricularFace(),
                 'MagFace' :  MagFace(),
                 'NPCFace' :  MV_Softmax.py(),
                 'SST_Prototype' SST_Prototype(),
                 
                 }
    LOSS = LOSS_DICT[LOSS_NAME]
    print("=" * 60)
    print(LOSS)