コード例 #1
0
def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = True

    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))
    print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weights
    model = FaceModel(embedding_size=args.embedding_size,
                      num_classes=len(train_dir.classes),
                      pretrained=False)

    if args.cuda:
        model.cuda()

    optimizer = create_optimizer(model, args.lr)

    start = args.start_epoch
    end = start + args.epochs

    for epoch in range(start, end):
        train(train_loader, model, optimizer, epoch)
コード例 #2
0
ファイル: train_triplet.py プロジェクト: Coga8/own_facenet
def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = True

    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))
    print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weights
    model = FaceModel(embedding_size=args.embedding_size,
                      num_classes=len(train_dir.classes),
                      pretrained=False)

    model.to(device)
    triplet_loss = TripletMarginLoss(args.margin)
    optimizer = create_optimizer(model, args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
        else:
            print('=> no checkpoint found at {}'.format(args.resume))

    start = args.start_epoch
    end = start + args.epochs

    for epoch in range(start, end):
        print(80 * '=')
        print('Epoch [{}/{}]'.format(epoch, end - 1))
        time0 = time.time()
        own_train(train_loader, model, triplet_loss, optimizer, epoch,
                  data_size)
        print(f' Execution time    = {time.time() - time0}')
        print(80 * '=')

        if test_display_triplet_distance:
            display_triplet_distance(model, train_loader,
                                     LOG_DIR + "/train_{}".format(epoch))
    print(80 * '=')
    time0 = time.time()
    own_test(test_loader, model, epoch)
    print(f' Execution time    = {time.time() - time0}')
    print(80 * '=')
    if test_display_triplet_distance:
        display_triplet_distance_test(model, test_loader,
                                      LOG_DIR + "/test_{}".format(epoch))
コード例 #3
0
def main():
    # Views the training images and displays the distance on anchor-negative and anchor-positive
    test_display_triplet_distance = True

    # print the experiment configuration
    print('\nparsed options:\n{}\n'.format(vars(args)))
    print('\nNumber of Classes:\n{}\n'.format(len(train_dir.classes)))

    # instantiate model and initialize weights
    model = FaceModel(inceptionresnet_v1,
                      embedding_size=args.embedding_size,
                      num_classes=len(train_dir.classes),
                      pretrained=False)

    if args.cuda:
        model.cuda()

    optimizer = create_optimizer(model, args.lr)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
        else:
            print('=> no checkpoint found at {}'.format(args.resume))

    start = args.start_epoch
    end = start + args.epochs

    para_model = torch.nn.parallel.data_parallel(model)
    for epoch in range(start, end):
        train(train_loader, para_model, optimizer, epoch)
        # test(test_loader, model, epoch)
        # do checkpointing
        torch.save({
            'epoch': epoch + 1,
            'state_dict': model.state_dict()
        }, '{}/checkpoint_{}.pth'.format(LOG_DIR, epoch))

        if test_display_triplet_distance:
            display_triplet_distance(model, train_loader,
                                     LOG_DIR + "/train_{}".format(epoch))
コード例 #4
0
def extraction(cfg):
    # cpu or gpu?
    if torch.cuda.is_available() and cfg.device is not None:
        device = torch.device(cfg.device)
    else:
        if not torch.cuda.is_available():
            print("hey man, buy a GPU!")
        device = torch.device("cpu")

    dataset = BaseDataset(path=cfg.dataset_path,
                          dataset=cfg.dataset,
                          mode=cfg.mode)
    data_loader = DataLoader(dataset,
                             cfg.batch_size,
                             shuffle=False,
                             num_workers=cfg.num_workers)

    if cfg.model_type == 'UniNet':
        featnet = FeatNet()
        featnet.load_state_dict(
            torch.load(cfg.featnet_path, map_location=device))
        featnet.to(device)
        masknet = MaskNet()
        masknet.load_state_dict(
            torch.load(cfg.masknet_path, map_location=device))
        masknet.to(device)

        with torch.no_grad():
            featnet.eval()
            masknet.eval()
            labels = []
            img_names = []
            labels_vec = np.zeros((len(dataset), dataset.class_num))
            features = np.zeros((len(dataset), 64, 512))
            masks = np.zeros((len(dataset), 3, 64, 512))
            didx = -1
            for img_batch, label_batch, label_vec_batch, img_name_batch in tqdm(
                    data_loader, ncols=80, ascii=True):
                didx += 1
                img_batch = img_batch.to(device)
                feature_batch = featnet(img_batch)
                mask_batch = masknet(img_batch)
                for idx in range(feature_batch.shape[0]):
                    labels_vec[idx + didx, :] = label_vec_batch[idx, :].numpy()
                    labels.append(label_batch[idx])
                    img_names.append(img_name_batch[idx])
                    features[idx +
                             didx, :, :] = feature_batch[idx].cpu().numpy()
                    masks[idx + didx, :2, :, :] = mask_batch[idx].cpu().numpy()
                    mask = F.softmax(mask_batch[idx], dim=0).cpu().numpy()
                    masks[idx + didx, 2, :, :] = mask[0] < mask[1]

    else:
        if cfg.model_type == 'maxout-feature':
            model = Maxout_feature()
        elif cfg.model_type == 'facenet':
            model = FaceModel(256)
        model.load_state_dict(torch.load(cfg.model_path, map_location=device))
        model.to(device)

        with torch.no_grad():
            model.eval()
            labels = []
            img_names = []
            labels_vec = np.zeros((len(dataset), dataset.class_num))
            features = np.zeros((len(dataset), 64, 512))
            masks = np.ones((len(dataset), 3, 64, 512))
            didx = -1
            for img_batch, label_batch, label_vec_batch, img_name_batch in tqdm(
                    data_loader, ncols=80, ascii=True):
                didx += 1
                img_batch = img_batch.to(device)
                feature_batch = model(img_batch)
                for idx in range(feature_batch.shape[0]):
                    labels_vec[idx + didx, :] = label_vec_batch[idx, :].numpy()
                    labels.append(label_batch[idx])
                    img_names.append(img_name_batch[idx])
                    features[idx +
                             didx, :, :] = feature_batch[idx].cpu().numpy()

    if cfg.save == 'mat':
        ft_path = 'feature/{}__{}.mat'.format(cfg.model, cfg.dataset)
        ft_load = {
            'features': features,
            'masks': masks,
            'labels_vec': labels_vec,
            'labels': labels
        }
        savemat(ft_path, ft_load)
    elif cfg.save == 'pic':
        if not os.path.exists('feature/{}__{}'.format(cfg.model, cfg.dataset)):
            os.makedirs('feature/{}__{}'.format(cfg.model, cfg.dataset))
        for idx in range(len(dataset)):
            feature_img = features[idx, :, :]
            feature_img = (feature_img - feature_img.min()) / (
                feature_img.max() - feature_img.min())
            Image.fromarray(feature_img * 255).convert('L').save(
                'feature/{}__{}/{}_feature.png'.format(cfg.model, cfg.dataset,
                                                       img_names[idx]))
            Image.fromarray(masks[idx, 2, :, :] * 255).convert('L').save(
                'feature/{}__{}/{}_mask.png'.format(cfg.model, cfg.dataset,
                                                    img_names[idx]))

    return features, masks, labels, labels_vec
コード例 #5
0
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = facec.detectMultiScale(rgb, 1.3, 5)
        # Loop through all the faces detected
        identities = []
        for (x, y, w, h) in faces:
            x1 = x
            y1 = y
            x2 = x + w
            y2 = y + h
            face_image = faces[max(0, y1):min(height, y2),
                               max(0, x1):min(width, x2)]
            roi = cv2.resize(face_image, (150, 150))
            pred = cnn.predict_face(roi[np.newaxis, :, :, np.newaxis])

            cv2.putText(frame, pred, (x, y), font, 1, (255, 255, 0), 2)
            cv2.rectangle(fr, (x, y), (x + w, y + h), (255, 0, 0), 2)
            fps.update()

        if cv2.waitKey(1) == 27:
            break
        cv2.imshow('Filter', fr)
        cv2.destroyAllWindows()
        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))


if __name__ == '__main__':
    model = FaceModel("model.json", "model_weights.h5")
    start_app(model)
コード例 #6
0
from model import Stage2Model, FaceModel, SelectNet_resnet
from tensorboardX import SummaryWriter
from dataset import HelenDataset
from torchvision import transforms
from preprocess import ToPILImage, ToTensor, OrigPad, Resize
from torch.utils.data import DataLoader
from helper_funcs import F1Score, calc_centroid, affine_crop, affine_mapback
import torch.nn.functional as F
import torchvision
import torch
import os

writer = SummaryWriter('log')
device = torch.device("cuda:6" if torch.cuda.is_available() else "cpu")
model1 = FaceModel().to(device)
model2 = Stage2Model().to(device)
select_model = SelectNet_resnet().to(device)
# load state
#save model at checkpoints_ABC/00ca488c/25.pth.tar
# epoch 25        error 0.0605    best_error 0.0589

# pathABC = os.path.join("/home/yinzi/data4/new_train/checkpoints_ABC/00ca488c", "best.pth.tar")
# pathABC = os.path.join("/home/yinzi/data4/new_train/checkpoints_ABC/c8c68e16", "best.pth.tar")
# pathABC = os.path.join("/home/yinzi/data4/new_train/checkpoints_ABC/ea3c3972", "best.pth.tar")
pathABC = os.path.join("/home/yinzi/data4/new_train/checkpoints_ABC/09d01660",
                       "best.pth.tar")

pathAB = os.path.join("/home/yinzi/data4/new_train/checkpoints_AB/6b4324c6",
                      "best.pth.tar")
pathB = os.path.join("/home/yinzi/data4/new_train/checkpoints_AB/6b4324c6",
                     'best.pth.tar')
コード例 #7
0
ファイル: facial_expression.py プロジェクト: Djack1028/face
#**********************Need replace the data source*********
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

# Input image dimensions.
input_shape = x_train.shape[1:]

# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
#**********************Need replace the data source*********

# prepare the traning model
faceModel = FaceModel(net_type=net_type, 
                    input_shape=input_shape, 
                    drop_out=opm_drop_out, 
                    drop_out_ratio=drop_out_ratio
                    batch_normalization=opm_batch_normalization,
                    activiation=activiation,
                    num_class=num_facial_expression)

modle = faceModel.get_model_instance()
model.compile(loss='categorical_crossentropy',
              optimizer=opm,
              metrics=['accuracy',metric.top_k_categorical_accuracy])
model.summary()
print(model_type)

# Prepare model saving directory.
model_saved_path = utils.get_saved_model_path(net_type)
print(model_saved_path)
コード例 #8
0
ファイル: train.py プロジェクト: Veligura/CourseAI-1
            k = k % imgCount
            if not k:
                random.shuffle(imgNames)
            rgb1Batch[b, :, :, :], labelWVBatch[b, :, :, 0] = prepareImage(k)
            k += 1
        yield rgb1Batch, labelWVBatch


batch_size = 4

traingen = generator(imgList[:trainCount], batch_size=batch_size)
testgen = generator(imgList[trainCount:], batch_size=batch_size)

testCount = len(imgList[trainCount:])

model = FaceModel()
model.summary()
model.load_weights('./weights/faceModel.hdf5')
if not testMode:
    model.fit_generator(generator=traingen,
                        validation_data=testgen,
                        steps_per_epoch=int(trainCount / batch_size),
                        validation_steps=int(testCount / batch_size),
                        epochs=30000,
                        verbose=1,
                        callbacks=[
                            ModelCheckpoint('./weights/faceModel.hdf5',
                                            verbose=1,
                                            monitor='val_loss',
                                            save_best_only=False)
                        ])
コード例 #9
0
from model import FaceModel
from configurations import DATASET_IMAGES
from helpers import load_tf_image


def grouper(iterable, n, fillvalue=None):
    "Collect data into fixed-length chunks or blocks"
    # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
    args = [iter(iterable)] * n
    return zip_longest(*args, fillvalue=fillvalue)


if __name__ == '__main__':
    # Create path:descriptor lookup

    model = FaceModel()
    model.load_weights('./checkpoints/my_checkpoint')

    image_paths = []
    descs = []
    for person_folder in DATASET_IMAGES.iterdir():
        if not person_folder.is_dir():
            continue

        for img_path in person_folder.glob('*.jpg'):
            image_paths.append(img_path)

    # Process 100 images at once
    batches = list(grouper(image_paths, 200))
    for batch in tqdm(batches):
        images = tf.stack([
コード例 #10
0
    # instantiate model and initialize weights



    # optionally resume from a checkpoint
    '''
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint {}'.format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
        else:
            checkpoint = None
            print('=> no checkpoint found at {}'.format(args.resume))
    '''
    model = FaceModel(embedding_size=args.embedding_size,num_classes=len(train_dir.classes))

    if args.cuda:
        model.cuda()

    optimizer = create_optimizer(model, args.lr)

    start = args.start_epoch
    end = start + args.epochs

    for epoch in range(start, end):
        train(train_loader, model, optimizer, epoch)
        test(test_loader, model, epoch)
        testaccuracy(testaccuracy_loader, model, epoch)
        if test_display_triplet_distance:
            display_triplet_distance_test(model,test_loader,LOG_DIR+"/test_{}".format(epoch))
コード例 #11
0
def main():
    args = parser.parse_args()

    cuda = torch.cuda.is_available()
    torch.manual_seed(args.seed)
    if cuda:
        torch.cuda.manual_seed(args.seed)

    # 1. dataset
    root = args.root
    kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
    test_transforms = transforms.Compose([
        transforms.Scale(96),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    test_dataset = ImageFolder(root, transform=test_transforms)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              **kwargs)

    val_iterator = validation_iterator(test_loader)

    # 2. model
    #train_dir = FaceDataset(dir='/media/lior/LinuxHDD/datasets/MSCeleb-cleaned',n_triplets=10)

    print('construct model')
    model = FaceModel(embedding_size=128, num_classes=3367, pretrained=False)

    model = model.cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)

            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}'".format(args.resume))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # extract feature
    print('extracting feature')
    embeds = []
    labels = []
    for data, target in val_iterator:
        if cuda:
            data, target = data.cuda(), target.cuda(async=True)
        data_var = Variable(data, volatile=True)
        # compute output
        output = model(data_var)

        embeds.append(output.data.cpu().numpy())
        labels.append(target.cpu().numpy())

    embeds = np.vstack(embeds)
    labels = np.hstack(labels)

    print('embeds shape is ', embeds.shape)
    print('labels shape is ', labels.shape)

    # prepare dict for display
    namedict = dict()
    for i in range(10):
        namedict[i] = str(i)

    visual_feature_space(embeds, labels, len(test_dataset.classes), namedict)
コード例 #12
0
def main():
    parser = argparse.ArgumentParser(
        description='Classifiar using triplet loss.')
    parser.add_argument('--CVDs',
                        type=str,
                        default='0,1,2,3',
                        metavar='CUDA_VISIBLE_DEVICES',
                        help='CUDA_VISIBLE_DEVICES')
    parser.add_argument(
        '--train-set',
        type=str,
        default='/home/zili/memory/FaceRecognition-master/data/mnist/train',
        metavar='dir',
        help='path of train set.')
    parser.add_argument(
        '--test-set',
        type=str,
        default='/home/zili/memory/FaceRecognition-master/data/mnist/test',
        metavar='dir',
        help='path of test set.')
    parser.add_argument(
        '--train-set-csv',
        type=str,
        default='/home/zili/memory/FaceRecognition-master/data/mnist/train.csv',
        metavar='file',
        help='path of train set.csv.')
    parser.add_argument(
        '--test-set-csv',
        type=str,
        default='/home/zili/memory/FaceRecognition-master/data/mnist/test.csv',
        metavar='file',
        help='path of test set.csv.')
    parser.add_argument('--num-triplet',
                        type=int,
                        default=10000,
                        metavar='N',
                        help='number of triplet in dataset (default: 32)')
    parser.add_argument('--train-batch-size',
                        type=int,
                        default=256,
                        metavar='N',
                        help='input batch size for training (default: 32)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=512,
                        metavar='N',
                        help='input batch size for testing (default: 64)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 100)')
    parser.add_argument('--embedding-size',
                        type=int,
                        default=256,
                        metavar='N',
                        help='embedding size of model (default: 256)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.05,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--margin',
                        type=float,
                        default=1.0,
                        metavar='margin',
                        help='loss margin (default: 1.0)')
    parser.add_argument('--kneighbor',
                        type=int,
                        default=20,
                        metavar='N',
                        help='how many neighbor in testing')
    parser.add_argument('--num-classes',
                        type=int,
                        default=10,
                        metavar='N',
                        help='classes number of dataset')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.8,
                        metavar='M',
                        help='SGD momentum (default: 0.9)')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=4,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--model-name',
                        type=str,
                        default='resnet34',
                        metavar='M',
                        help='model name (default: resnet34)')
    parser.add_argument('--dropout-p',
                        type=float,
                        default=0.2,
                        metavar='D',
                        help='Dropout probability (default: 0.2)')
    parser.add_argument('--check-path',
                        type=str,
                        default='checkpoints3',
                        metavar='C',
                        help='Checkpoint path')
    parser.add_argument(
        '--is-semihard',
        type=bool,
        default=True,
        metavar='R',
        help='whether the dataset is selected in semi-hard way.')
    parser.add_argument('--is-pretrained',
                        type=bool,
                        default=False,
                        metavar='R',
                        help='whether model is pretrained.')

    args = parser.parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.CVDs

    output1 = 'main' + str(datetime.datetime.now())
    f = open(args.check_path + os.path.sep + output1 + '.txt', 'w+')

    l2_dist = PairwiseDistance(2)
    writer = SummaryWriter()

    print('Loading model...')

    model = FaceModel(embedding_size=args.embedding_size,
                      num_classes=args.num_classes,
                      pretrained=args.is_pretrained)
    f.write("            model: {}".format(model.model) + '\r\n')

    if torch.cuda.is_available():
        model = torch.nn.DataParallel(model).cuda()
        cudnn.benchmark = True

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=1e-5)

    # optimizer = optim.Adam(model.parameters(), lr=args.lr)

    print('start training...')
    features, labels, clf = feature(model, args)

    for epoch in range(args.epochs):
        if epoch % 5 == 0:
            file_operation(f, args, optimizer)

        if (epoch + 1) % 2 == 0:
            args.lr = args.lr / 3
            update_lr(optimizer, args.lr)

        generate_csv(args)
        train(epoch, model, optimizer, args, f, writer, features)
        features, labels, clf = feature(model, args)
        validate(epoch, model, clf, args, f, writer)

        f.write('\r\n')
    torch.save(model, args.check_path + os.path.sep + output1 + '.pkl')
コード例 #13
0
    train_dataset: tf.data.Dataset = tf.data.Dataset.from_generator(
        lambda: generate_triplet(people_train_multiple_images,
                                 people_train_names),
        (tf.float32, tf.float32, tf.float32))
    train_dataset = train_dataset.batch(BATCH_SIZE)
    # Make sure cpu prepares data in background https://www.tensorflow.org/beta/guide/data_performance
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)
    test_dataset: tf.data.Dataset = tf.data.Dataset.from_generator(
        lambda: generate_triplet(people_test_multiple_images, people_test_names
                                 ), (tf.float32, tf.float32, tf.float32))
    test_dataset = test_dataset.batch(BATCH_SIZE)
    test_dataset = test_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)

    model = FaceModel()
    # Optionally load weights here
    if os.path.exists('./checkpoints'):
        print('Loading weights')
        model.load_weights('./checkpoints/my_checkpoint')
    model.build(input_shape=(None, IMG_SIZE, IMG_SIZE, 3))
    model.summary()

    train_loss = tf.keras.metrics.Mean(name='train_loss')
    test_loss = tf.keras.metrics.Mean(name='test_loss')
    optimizer = tf.keras.optimizers.Adam(0.0001)
    epochs = 300
    batch_count = 5

    # Just iterate over the dataset(tensorflow 2.0)
    for epoch in range(epochs):
コード例 #14
0
from model import Stage2Model, FaceModel, SelectNet
from tensorboardX import SummaryWriter
from dataset import HelenDataset
from torchvision import transforms
from preprocess import ToPILImage, ToTensor, OrigPad, Resize
from torch.utils.data import DataLoader
from helper_funcs import F1Score, calc_centroid, affine_crop, affine_mapback
import torch.nn.functional as F
import torchvision
import torch
import os

writer = SummaryWriter('log')
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
model1 = FaceModel().to(device)
model2 = Stage2Model().to(device)
select_model = SelectNet().to(device)
# load state
# path_model2 = os.path.join("/home/yinzi/data4/new_train/checkpoints_C/02a38440", "best.pth.tar")
# path_model2 = os.path.join("/home/yinzi/data4/new_train/checkpoints_C/ca8f5c52", "best.pth.tar")
# path_model2 = os.path.join("/home/yinzi/data4/new_train/checkpoints_C/b9d37dbc", "best.pth.tar")
# path_model2 = os.path.join("/home/yinzi/data4/new_train/checkpoints_C/49997f1e", "best.pth.tar")
# path_model2 = os.path.join("/home/yinzi/data4/new_train/checkpoints_C/396e4702", "best.pth.tar")
path_model2 = os.path.join(
    "/home/yinzi/data4/new_train/checkpoints_C/396e4702", "best.pth.tar")
# path_model2 = os.path.join("/home/yinzi/data4/new_train/checkpoints_C/1daed2c2", "best.pth.tar")
# path_model2 = os.path.join("/home/yinzi/data4/new_train/checkpoints_ABC/ea3c3972", "best.pth.tar")
path_select = os.path.join(
    "/home/yinzi/data4/new_train/checkpoints_AB/6b4324c6", "best.pth.tar")
#396e4702的嘴得分是0.9166 overall 0.865(0.869)
#396e4702的单独最佳得分是0.8714