Ejemplo n.º 1
0
    #    test_input = None
    #    test_label = None

    feature_dimension = train_data[0].shape[1]

    if args.type == "conv":
        train_data = np.expand_dims(train_data, axis=1)
        label_data = np.expand_dims(label_data, axis=1)
        net = conv_transform_net(f_dimension=feature_dimension, \
                   mid_dimension=args.mid_dimen, mid_num=args.mid_num, \
                   seq_length=args.seq_length)
        if (args.test_input_mat is not None):
            test_input = np.expand_dims(test_input, axis=1)
            test_label = np.expand_dims(test_label, axis=1)
    else:
        net = transform_net(f_dimension=feature_dimension, \
                   mid_dimension=args.mid_dimen, mid_num=args.mid_num)

    save_name = args.saving_title + args.type + 'transform_{}-midNum_{}-midDimen'.format(
        args.mid_num, args.mid_dimen)

    net = train(train_data, label_data, net, args.batch_size, args.epoch_num,
                args.learning_rate, test_input_mat_list, test_label_mat,
                args.whether_save, save_name)
    # net = train(train_data, label_data, net, args.batch_size, args.epoch_num, args.learning_rate)

    if (args.whether_save is True):
        save_name = args.saving_title + args.type + 'transform_{}-midNum_{}-midDimen-epoch_{}.pth'.format(
            args.mid_num, args.mid_dimen, args.epoch_num)
        save_model(net, save_name)
                    type=str,
                    default="transform_lfw_feature.mat")

args = parser.parse_args()

os.environ["CUDA_VISIBLE_DEVICES"] = args.use_gpu
predicts = []
net = getattr(net_sphere, args.net)()
net.load_state_dict(torch.load(args.model))
net.cuda()
net.eval()
net.feature = True
net.requires_grad = True


transform_net = transform_net(f_dimension=args.feature_dimen, \
                    mid_dimension=args.mid_dimen, mid_num=args.mid_num)

zfile = zipfile.ZipFile(args.dataset)
landmark = {}
with open(args.BLUFR) as f:
    landmark_lines = f.readlines()
num_person = len(landmark_lines)  # number of the total images

img = []
feature_list = []

batch_num = int(num_person / args.batch_size)
for i in range(batch_num):
    img_batch = []
    for j in range(args.batch_size):
        line = landmark_lines[i * args.batch_size + j].strip('\n')
Ejemplo n.º 3
0
                plt.plot(tensor_dict_resize[i][j, 0],
                         tensor_dict_resize[i][j, 1],
                         color=color_param_mark[i],
                         marker='.')

    plt.subplot(221)
    for i in range(class_num):
        a, b = np.shape(tensor_dict[i])
        for j in range(a):
            plt.plot(tensor_dict[i][j, 0],
                     tensor_dict[i][j, 1],
                     color=color_param[i],
                     marker='.')


    transform_net = transform_net(f_dimension=2, \
            mid_dimension=args.mid_dimen, mid_num=args.mid_num)

    plt.subplot(223)
    for i in range(class_num):
        a, b = np.shape(tensor_dict[i])
        for j in range(a):
            plt.plot(tensor_dict[i][j, 0],
                     tensor_dict[i][j, 1],
                     color=color_param[i],
                     marker='.')

    transform_dict = {}
    for i in range(class_num):
        tensor_i = torch.from_numpy(tensor_dict[i]).type(
            torch.FloatTensor)  # .cuda()
        feature = transform_net(tensor_i)