Пример #1
0
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
keras.backend.set_session(sess)

if __name__ == '__main__':
    model = Model(ResNet(predict=True))
    model.compile(torch.optim.SGD(model.model.parameters(),
                                  lr=0.001,
                                  momentum=0.9,
                                  weight_decay=1e-4),
                  ContrastiveLoss(),
                  metric=None,
                  device='cuda')
    model.load_weights(
        '/home/palm/PycharmProjects/seven2/snapshots/pairs/4/epoch_0_0.016697616640688282.pth'
    )
    model.model.eval()
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    transform = transforms.Compose(
        [transforms.Resize((224, 224)),
         transforms.ToTensor(), normalize])

    labels_to_names = {0: 'obj'}
    prediction_model = models.load_model(
        '/home/palm/PycharmProjects/seven2/snapshots/infer_model_temp.h5')

    query_path = '/home/palm/PycharmProjects/seven/images/cropped5/train'
    cache_path = '/home/palm/PycharmProjects/seven/caches'
    cache_dict = {}
Пример #2
0
def predict():
    model = Model(ResNet(predict=True))
    model.compile(torch.optim.SGD(model.model.parameters(),
                                  lr=0.001,
                                  momentum=0.9,
                                  weight_decay=1e-4),
                  ContrastiveLoss(),
                  metric=None,
                  device='cuda')
    model.load_weights(
        '/home/palm/PycharmProjects/seven2/snapshots/pairs/5/epoch_1_0.012463876953125.pth'
    )
    model.model.eval()

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    transform = transforms.Compose(
        [transforms.Resize((224, 224)),
         transforms.ToTensor(), normalize])

    target_path = '/home/palm/PycharmProjects/seven/images/test6/train'
    query_path = '/home/palm/PycharmProjects/seven/images/cropped6/train'
    cache_path = '/home/palm/PycharmProjects/seven/caches'
    cache_dict = {}
    predicted_dict = {}
    correct = 0
    count = 0
    with torch.no_grad():
        for target_image_folder in os.listdir(target_path):
            if target_image_folder not in os.listdir(query_path):
                continue
            predicted_dict[target_image_folder] = {}
            for target_image_path in os.listdir(
                    os.path.join(target_path, target_image_folder)):
                count += 1
                target = os.path.join(target_path, target_image_folder,
                                      target_image_path)
                target_image_ori = Image.open(target)
                target_image = transform(target_image_ori)
                x = torch.zeros((1, 3, 224, 224))
                x[0] = target_image
                target_features = model.model._forward_impl(x.cuda())
                minimum = (float('inf'), 0)
                for query_folder in os.listdir(query_path):
                    for query_image_path in os.listdir(
                            os.path.join(query_path, query_folder)):
                        query = os.path.join(query_path, query_folder,
                                             query_image_path)
                        cache_dict, query_features = memory_cache(
                            cache_dict, model.model, query,
                            os.path.join(cache_path, query_folder,
                                         query_image_path + '.pth'), transform)
                        y = LSHash.euclidean_dist(
                            target_features.cpu().numpy()[0],
                            query_features.cpu().numpy()[0])
                        if y < minimum[0]:
                            minimum = (y, query_folder)
                print(*minimum, target_image_folder)
                predicted_dict[target_image_folder][
                    target_image_path] = minimum[1]
                if minimum[1] == target_image_folder:
                    correct += 1
    print(count / correct)
    pk.dump(predicted_dict, open('cls_eval.pk', 'wb'))
Пример #3
0
        cropped_image = cv2.copyMakeBorder(cropped_image, p, p, 0, 0, cv2.BORDER_CONSTANT)
    else:
        p = ((y2 - y1) - (x2 - x1)) // 2
        cropped_image = cv2.copyMakeBorder(cropped_image, 0, 0, p, p, cv2.BORDER_CONSTANT)
    return cropped_image

if __name__ == '__main__':
    model = Model(ResNet(predict=True))
    model.compile(torch.optim.SGD(model.model.parameters(),
                                  lr=0.001,
                                  momentum=0.9,
                                  weight_decay=1e-4),
                  ContrastiveLoss(),
                  metric=None,
                  device='cuda')
    model.load_weights('/home/palm/PycharmProjects/seven2/snapshots/pairs/5/epoch_1_0.012463876953125.pth')
    model.model.eval()
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    transform = transforms.Compose([transforms.Resize((224, 224)),
                                    transforms.ToTensor(),
                                    normalize])

    labels_to_names = [x.split(',')[0] for x in open('/home/palm/PycharmProjects/seven2/anns/classes.csv').read().split('\n')[:-1]]
    prediction_model = models.load_model('/home/palm/PycharmProjects/seven2/snapshots/infer_model_temp.h5')
    names_to_labels = {}
    for x in open('/home/palm/PycharmProjects/seven2/anns/classes.csv').read().split('\n')[:-1]:
        names_to_labels[x.split(',')[0]] = int(x.split(',')[1])
    query_path = '/home/palm/PycharmProjects/seven/images/cropped7/train'
    cache_path = '/home/palm/PycharmProjects/seven/caches'
    cache_dict = {}
Пример #4
0
    """ This is a hot function, hence some optimizations are made. """
    diff = np.array(x) - y
    return np.sqrt(np.dot(diff, diff))


if __name__ == '__main__':
    model = Model(ResNet(predict=True))
    model.compile(torch.optim.SGD(model.model.parameters(),
                                  lr=0.001,
                                  momentum=0.9,
                                  weight_decay=1e-4),
                  ContrastiveLoss(),
                  metric=None,
                  device='cuda')
    model.load_weights(
        '/home/palm/PycharmProjects/seven2/snapshots/pairs/3/epoch_0_0.03454810580774366.pth'
    )
    model.model.eval()
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    transform = transforms.Compose(
        [transforms.Resize((224, 224)),
         transforms.ToTensor(), normalize])

    lsh = LSHash(hash_size=16, input_dim=1024, num_hashtables=5)

    cache_folder = '/home/palm/PycharmProjects/seven/caches'
    with torch.no_grad():
        target_image_ori = Image.open(
            '/home/palm/PycharmProjects/seven/images/cropped2/unknown/obj/0_036.jpg'
        )
Пример #5
0
    def __str__(self):
        return 'acc()'


if __name__ == '__main__':
    save_no = len(os.listdir('./snapshots/pairs'))
    impath = '/home/palm/PycharmProjects/seven/images/cropped3/train'
    model = Model(ResNet(zero_init_residual=False))
    model.compile(torch.optim.SGD(model.model.parameters(),
                                  lr=0.001,
                                  momentum=0.9,
                                  weight_decay=1e-4),
                  ContrastiveLoss(),
                  metric=None,
                  device='cuda')
    model.load_weights('./snapshots/base.pth', load_opt=False)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_datagen = DirectorySiameseLoader(
        impath,
        transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.ToTensor(), normalize
        ]))
    train_generator = train_datagen.get_dset(8, 1)
    os.makedirs(f'./snapshots/pairs/{save_no}', exist_ok=True)