def predict_image_class(query_folder, target_features, cache_dict, class_minimum): minimum = (float('inf'), 0) for query_image_path in os.listdir(os.path.join(query_path, query_folder)): t = time.time() query = os.path.join(query_path, query_folder, query_image_path) t1 = time.time() - t cache_dict, query_features = memory_cache(cache_dict, model.model, query, os.path.join(cache_path, query_folder, query_image_path + '.pth'), transform) t2 = time.time() - t y = LSHash.euclidean_dist(target_features.cpu().numpy()[0], query_features.cpu().numpy()[0]) t3 = time.time() - t print(t1, t2, t3) if y < minimum[0]: minimum = (y, query_folder) class_minimum[query_folder] = minimum
target_image = transform(target_image_ori) x = torch.zeros((1, 3, 224, 224)) x[0] = target_image target_features = model.model._forward_impl(x.cuda()) for query_folder in os.listdir(query_path): for query_image_path in os.listdir( os.path.join(query_path, query_folder)): query = os.path.join(query_path, query_folder, query_image_path) cache_dict, query_features = memory_cache( cache_dict, model.model, query, os.path.join(cache_path, query_folder, query_image_path + '.pth'), transform) y = LSHash.euclidean_dist( target_features.cpu().numpy()[0], query_features.cpu().numpy()[0]) if y < minimum[0]: minimum = (y, query_folder) if minimum[0] > 1: minimum = (minimum[0], 'obj') # print(minimum) obj = ET.SubElement(root, 'object') ET.SubElement(obj, 'name').text = minimum[1] bndbx = ET.SubElement(obj, 'bndbox') ET.SubElement(bndbx, 'xmin').text = str(b[0]) ET.SubElement(bndbx, 'ymin').text = str(b[1]) ET.SubElement(bndbx, 'xmax').text = str(b[2]) ET.SubElement(bndbx, 'ymax').text = str(b[3]) print(time.time() - start_time) # cv2.imshow(f'im_{i}', draw)
def predict(): model = Model(ResNet(predict=True)) model.compile(torch.optim.SGD(model.model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4), ContrastiveLoss(), metric=None, device='cuda') model.load_weights( '/home/palm/PycharmProjects/seven2/snapshots/pairs/5/epoch_1_0.012463876953125.pth' ) model.model.eval() normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose( [transforms.Resize((224, 224)), transforms.ToTensor(), normalize]) target_path = '/home/palm/PycharmProjects/seven/images/test6/train' query_path = '/home/palm/PycharmProjects/seven/images/cropped6/train' cache_path = '/home/palm/PycharmProjects/seven/caches' cache_dict = {} predicted_dict = {} correct = 0 count = 0 with torch.no_grad(): for target_image_folder in os.listdir(target_path): if target_image_folder not in os.listdir(query_path): continue predicted_dict[target_image_folder] = {} for target_image_path in os.listdir( os.path.join(target_path, target_image_folder)): count += 1 target = os.path.join(target_path, target_image_folder, target_image_path) target_image_ori = Image.open(target) target_image = transform(target_image_ori) x = torch.zeros((1, 3, 224, 224)) x[0] = target_image target_features = model.model._forward_impl(x.cuda()) minimum = (float('inf'), 0) for query_folder in os.listdir(query_path): for query_image_path in os.listdir( os.path.join(query_path, query_folder)): query = os.path.join(query_path, query_folder, query_image_path) cache_dict, query_features = memory_cache( cache_dict, model.model, query, os.path.join(cache_path, query_folder, query_image_path + '.pth'), transform) y = LSHash.euclidean_dist( target_features.cpu().numpy()[0], query_features.cpu().numpy()[0]) if y < minimum[0]: minimum = (y, query_folder) print(*minimum, target_image_folder) predicted_dict[target_image_folder][ target_image_path] = minimum[1] if minimum[1] == target_image_folder: correct += 1 print(count / correct) pk.dump(predicted_dict, open('cls_eval.pk', 'wb'))