Пример #1
0
def visualize_similarity_smallset(in_dir, out_dir, model_path):
    
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    
    # load model
    model_state_dict = torch.load(model_path)
    model = network.resnet_50_embedding()
    model.load_state_dict(model_state_dict['model_state_dict'])
    model = model.cuda()
    
    list_dir = os.listdir(in_dir)
    
    for d in list_dir:
        in_path = os.path.join(in_dir, d)
        if not os.path.isdir(in_path):
            continue
        
        out_path = os.path.join(out_dir, d)
        if not os.path.isdir(out_path):
            os.mkdir(out_path)
    
        dataset = OPENFusionStereoDatasetPath(in_path, transform=image_transform)
    
        # Run the images through the network, get last conv features
        fvecs, maps, labels, image_path_list = embed(dataset,model)
        
        maps = np.moveaxis(maps,1,3)
        #fvecs = np.array(fvecs)
        #labels = np.array(labels)
    
        # Compute the spatial similarity maps (returns a heatmap that's the size of the last conv layer)
        for i in range(len(image_path_list)):
            for j in range(len(image_path_list)):
                if abs(i-j) > 2:
                    continue
                if i == j:
                    continue
                conv1 = maps[i]
                conv2 = maps[j]
                conv1 = conv1.reshape(-1,conv1.shape[-1])
                conv2 = conv2.reshape(-1,conv2.shape[-1])
                heatmap1, heatmap2 = compute_spatial_similarity(conv1, conv2)
                
                im1_path = image_path_list[i][0]
                im2_path = image_path_list[j][0]
                
                # Combine the images with the (interpolated) similarity heatmaps.
                im1_with_similarity = combine_image_and_heatmap_noninter(load_and_resize(im1_path),heatmap1)
                im2_with_similarity = combine_image_and_heatmap_noninter(load_and_resize_grayscale(im2_path),heatmap2)
                
                save_img = combine_horz([im1_with_similarity,im2_with_similarity])
                
                save_img.save(os.path.join(out_path,'b{}-{}_t{}-{}.png'.format(os.path.basename(im1_path)[:-4], i, os.path.basename(im2_path)[:-4], j)))
                #Image.fromarray(im2_with_similarity).save(os.path.join(out_path,'b{}_t{}.png'.format(labels[j],labels[i])))
    
    return
Пример #2
0
def grid_match_jointdata_scan_whole_image(rgb_img_path, thermal_img_path, out_dir, model_path):
    
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    
    imgSize = 400
    gridSize = 256
    stepSize = 32
    # init grid pool, return: vec of box coordinates
    grid_pool = init_grid_pool_largeGrid(imgSize, gridSize, stepSize)
    
        
    # init image set base on grid pool, both sensor's image
    rgb_grid_data = GridMatchDatasetPath(rgb_img_path, grid_pool, imgSize, image_transform)
    thermal_grid_data = GridMatchDatasetPath(thermal_img_path, grid_pool, imgSize, image_transform)
    
    # load model
    model_state_dict = torch.load(model_path)
    model = network.resnet_50_embedding()
    # remove `module.` for parallel
    new_state_dict = OrderedDict()
    for k, v in model_state_dict['model_state_dict'].items():
        name = k[7:] # remove `module.`
        new_state_dict[name] = v
    # load params
    model.load_state_dict(new_state_dict)
    model.cuda()
    model.eval()
    
    output_number = 0
    
    for base_grid in grid_pool:
        # select base roi from RGB
        base_roi_image = init_base_rgb_roi_image(rgb_img_path, base_grid, out_dir)
    
        # get base embedding
        base_embedding = embed_single_image(base_roi_image, model)
    
        # loop thermal data set and get pair embeddings+++
        pair_embeddings, labels = embed_pair_grid(thermal_grid_data, model)
    
        # get a score for the grid
        scores = get_scores_for_grids(base_embedding, pair_embeddings, labels, base_grid)
    
        # output grid images sorted by score
        output_grid_images_by_scores_topN(thermal_img_path, scores, labels, out_dir, output_number, base_grid)
    
    # draw a map base on all the grid score
    #generate_score_map(imgSize, scores, labels)
    
    # output a scored heat map
            
    return
Пример #3
0
def loadModelParallel(model_path):
    
    # load model
    model_state_dict = torch.load(model_path)
    model = network.resnet_50_embedding()
    # remove `module.` for parallel
    new_state_dict = OrderedDict()
    for k, v in model_state_dict['model_state_dict'].items():
        name = k[7:] # remove `module.`
        new_state_dict[name] = v
    # load params
    model.load_state_dict(new_state_dict)
    model.cuda()
    model.eval()
    
    return model
Пример #4
0
def grid_match_jointdata_single_image(rgb_img_path, thermal_img_path, out_dir, model_path):
    
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    
    imgSize = 224
    gridSize = 128
    stepSize = 8
    # init grid pool, return: vec of box coordinates
    grid_pool = init_grid_pool_largeGrid(imgSize, gridSize, stepSize)
    
        
    # init image set base on grid pool, both sensor's image
    rgb_grid_data = GridMatchDatasetPath(rgb_img_path, grid_pool, imgSize, image_transform)
    thermal_grid_data = GridMatchDatasetPath(thermal_img_path, grid_pool, imgSize, image_transform)
    
    # load model
    model_state_dict = torch.load(model_path)
    model = network.resnet_50_embedding()
    model.load_state_dict(model_state_dict['model_state_dict'])
    model = model.cuda()
    model.eval()
    
    #base_grid = (64,40,128,104)
    base_grid = (64,40,64+128,40+128)
    
    # select base roi from RGB, human selected for now
    base_roi_image = init_base_rgb_roi_image(rgb_img_path, base_grid, out_dir)
    
    # get base embedding
    base_embedding = embed_single_image(base_roi_image, model)
    
    # loop thermal data set and get pair embeddings+++
    pair_embeddings, labels = embed_pair_grid(thermal_grid_data, model)
    
    # get a score for the grid
    scores = get_scores_for_grids(base_embedding, pair_embeddings, labels, base_grid)
    
    # output grid images sorted by score
    output_grid_images_by_scores(thermal_img_path, scores, labels, out_dir)
    
    # draw a map base on all the grid score
    #generate_score_map(imgSize, scores, labels)
    
    # output a scored heat map
            
    return
Пример #5
0
def loss_plot():

    dataset = OPENFusionJointDataset(
        '/media/zli/Seagate Backup Plus Drive/OPEN/ua-mac/Level_3/joint_training_data_test',
        transform=image_transform)

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=1)

    model_state_dict = torch.load(
        '/media/zli/Seagate Backup Plus Drive/trained_models/pytorch-py3/checkpoints/NearDateJointTripletMarginLoss/20200915_221501874/epoch_11.pth'
    )
    model = network.resnet_50_embedding()
    model.load_state_dict(model_state_dict['model_state_dict'])

    loss_func = MyNCALoss()

    # inference
    #model.load_state_dict(model_state_dict['model_state_dict'])
    model.cuda()
    model.eval()
    vector_list = []
    label_list = []
    loss_list = []
    with torch.no_grad():
        for data, target in tqdm(dataloader, total=len(dataloader)):
            data = data.cuda()
            model_output = model(data)
            loss = loss_func(model_output, target)
            loss = loss.item()
            vector_list.append(model_output)
            loss_list.append(loss)
            label_list.append(target)

    vectors = torch.cat(vector_list, 0)

    labels = {}
    for l in label_list:
        for key, val in l.items():
            if key not in labels:
                labels[key] = []
            labels[key].extend(val)

    return
Пример #6
0
def visualize_sim_pairs_in_folder(input_folder, model_path):
    
    # load model
    model_state_dict = torch.load(model_path)
    model = network.resnet_50_embedding()
    model.load_state_dict(model_state_dict['model_state_dict'])
    model = model.cuda()
    
    list_dir = os.listdir(input_folder)
    
    for d in list_dir:
        sub_dir = os.path.join(input_folder, d)
        if not os.path.isdir(sub_dir):
            continue
        png_suffix = os.path.join(sub_dir, '*.png')
        pngs = glob(png_suffix)
        im1_path = pngs[0]
        im2_path = pngs[1]
        
        image1 = Image.open(im1_path)
        image2 = Image.open(im2_path)
        image1 = image1.crop((40,40,360,360))
        image2 = image2.crop((40,40,360,360))
        
        #image1.show()
        
        fv1, fm1 = embed_single_image(image1, model)
        fv2, fm2 = embed_single_image(image2, model)
        
        fm1 = np.moveaxis(fm1,1,3)
        fm2 = np.moveaxis(fm2,1,3)
        
        conv1 = fm1.reshape(-1,fm1.shape[-1])
        conv2 = fm2.reshape(-1,fm2.shape[-1])
        #heatmap1, heatmap2 = compute_spatial_similarity(conv1, conv2)
        heatmap1, heatmap2 = combine_SVD(conv1,conv2)
        
        # Combine the images with the (interpolated) similarity heatmaps.
        im1_with_similarity = combine_image_and_heatmap_noninter(load_and_resize_grayscale(im1_path),heatmap1)
        im2_with_similarity = combine_image_and_heatmap_noninter(load_and_resize_grayscale(im2_path),heatmap2)
        
        save_img = combine_horz([im1_with_similarity,im2_with_similarity])
        
        save_img.save(os.path.join(sub_dir,'{}_simVis.png'.format(os.path.basename(im1_path)[:-4])))
    
    return
Пример #7
0
def output_image_and_activations(in_dir, out_dir, model_path):
    
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    
    # load model
    model_state_dict = torch.load(model_path)
    model = network.resnet_50_embedding()
    model.load_state_dict(model_state_dict['model_state_dict'])
    model = model.cuda()
    
    list_dir = os.listdir(in_dir)
    
    for d in list_dir:
        in_path = os.path.join(in_dir, d)
        if not os.path.isdir(in_path):
            continue
        
        out_path = os.path.join(out_dir, d)
        if not os.path.isdir(out_path):
            os.mkdir(out_path)
    
        dataset = OPENFusionStereoDatasetPath(in_path, transform=image_transform)
    
        # Run the images through the network, get last conv features
        fvecs, maps, labels, image_path_list = embed(dataset,model)
        
        maps = np.moveaxis(maps,1,3)
        
        for i in range(len(image_path_list)):
            conv1 = maps[i]
            im1_path = image_path_list[i][0]
            basename = os.path.basename(im1_path)[:-4]
            conv_file_path = os.path.join(out_path, '{}.pkl'.format(basename))
            mat_file_path = os.path.join(out_path, '{}.mat'.format(basename))
            img_path = os.path.join(out_path, '{}.png'.format(basename))
            
            sio.savemat(mat_file_path, {"conv": conv1})
            conv_df = pd.DataFrame({"conv": [conv1]})
            shutil.copyfile(im1_path, img_path)
            conv_df.to_pickle(conv_file_path)
            
    return
Пример #8
0
        '/media/zli/Seagate Backup Plus Drive/OPEN/ua-mac/Level_3/joint_training_data_validation_double/',
        transform=val_image_transform)
    val_fusion_dateset = TripletNearDateJointDataset(
        val_dataset,
        class_by='plot',
        date_by='scan_date',
        neighbor_distance=neighbor_distance,
        collate_fn=None,
        transform=val_image_transform)
    val_dataloader = torch.utils.data.DataLoader(val_fusion_dateset,
                                                 batch_size=val_batch_size,
                                                 shuffle=True,
                                                 num_workers=0)

    print('Init model')
    model = resnet_50_embedding()
    #loss_func = TripletMarginLoss()
    loss_func = MyNCALoss()
    model.cuda()
    optimizer = optim.SGD(model.parameters(), lr=lr)
    scheduler = lr_scheduler.StepLR(optimizer, 15, gamma=0.1, last_epoch=-1)
    print('Train 1 epoch')
    train(model,
          dataloader,
          loss_func,
          optimizer,
          scheduler,
          n_epochs,
          resume_dict=resume_dict,
          tb_log_dir=tb_log_dir,
          ckp_dir=ckp_dir,
Пример #9
0
def get_output():

    dataset = OPENFusionStereoDatasetPath(
        '/media/zli/Seagate Backup Plus Drive/OPEN/ua-mac/Level_3/joint_training_data_test_double',
        transform=image_transform)

    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=4)

    #model_state_dict = torch.load('/media/zli/Seagate Backup Plus Drive/trained_models/pytorch-py3/checkpoints/NearDateJointTripletMarginLoss_RotateSameWay30WithFLIP_320_temp3.5/20210106_180416201/epoch_22.pth')
    #model_state_dict = torch.load('/media/zli/Seagate Backup Plus Drive/trained_models/pytorch-py3/checkpoints/NearDateJointTripletMarginLoss_RotateDifferentWay30WithFLIP_320_temp3.5/20210110_161216957/epoch_21.pth')
    model_state_dict = torch.load(
        '/media/zli/Seagate Backup Plus Drive/trained_models/pytorch-py3/checkpoints/NearDateJointTripletMarginLoss_RotateDiffWay33_temp3.5_linear/20210122_192953695/epoch_33.pth'
    )
    model = network.resnet_50_embedding()

    # remove `module.` for parallel
    new_state_dict = OrderedDict()
    for k, v in model_state_dict['model_state_dict'].items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    # load params
    model.load_state_dict(new_state_dict)
    #model.load_state_dict(model_state_dict['model_state_dict'])

    # inference
    #model.load_state_dict(model_state_dict['model_state_dict'])
    model.cuda()
    model.eval()
    vector_list = []
    label_list = []
    image_path_list = []
    with torch.no_grad():
        for data, target, image_path in tqdm(dataloader,
                                             total=len(dataloader)):
            data = data.cuda()
            vector_list.append(model(data))
            label_list.append(target)
            image_path_list.append(image_path)
    #vectors = torch.cat(vector_list, 0)
    labels = {}
    for l in label_list:
        for key, val in l.items():
            if key not in labels:
                labels[key] = []
            labels[key].extend(val)
    image_paths = np.concatenate(image_path_list)
    vectors_norm = [
        torch.nn.functional.normalize(d, p=2, dim=1) for d in vector_list
    ]
    vectors = torch.cat(vectors_norm, 0).cpu().numpy()

    torch.save(
        {
            'vectors': vectors,
            'labels': labels,
            'image_paths': image_paths
        }, './results/{}_ep_{}.pth'.format('RotateDiffWay33_temp3.5_linear',
                                           33))

    return
Пример #10
0
def pca_vis_by_score(out_dir, model_path):
    
    
    # load model
    model_state_dict = torch.load(model_path)
    model = network.resnet_50_embedding()
    '''
    model.load_state_dict(model_state_dict['model_state_dict'])
    model = model.cuda()
    '''
    # remove `module.` for parallel
    new_state_dict = OrderedDict()
    for k, v in model_state_dict['model_state_dict'].items():
        name = k[7:] # remove `module.`
        new_state_dict[name] = v
    # load params
    model.load_state_dict(new_state_dict)
    model.cuda()
    model.eval()
    
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    
    task_name = 'RotateDiffWay30WithFLIP_320_temp3.5'
    load_result_ep = 21
    result_dict = torch.load('/home/zli/WorkSpace/PyWork/pytorch-py3/reverse-pheno-master/results/{}_ep_{}.pth'.format(task_name, load_result_ep))
    vectors = result_dict['vectors']
    labels = (result_dict['labels'])
    image_paths = result_dict['image_paths']
    
    total_nums = len(vectors)
    imsize = (320,320)
    
    for i in range(15,total_nums):
        
        print(i)
        
        base_vec = vectors[i]
        base_label = labels['plot'][i]
        base_sensor = labels['sensor'][i]
        dist_rec = np.zeros(total_nums)
        for j in range(total_nums):
            pair_sensor = labels['sensor'][j]
            loop_vec = vectors[j]
            if base_sensor is pair_sensor:
                dist_rec[j] = float('inf')
            else:
                dist_rec[j] = np.linalg.norm(loop_vec-base_vec)   
                # dot product
                #dist_rec[j] = abs(np.matmul(loop_vec, base_vec))
            
        min_index = dist_rec.argsort()[:4]
        mid_ind = int(total_nums/2)
        start_ind = mid_ind - 5
        max_index = dist_rec.argsort()[start_ind:mid_ind]
        
        out_path = os.path.join(out_dir, str(i))
        if not os.path.isdir(out_path):
            os.mkdir(out_path)
        
        src_path_base = image_paths[i]
        
        image1 = Image.open(src_path_base)
        image1 = image1.crop((40,40,360,360))
        fv1, fm1 = embed_single_image(image1, model)
        fm1 = np.moveaxis(fm1,1,3)
        conv1 = fm1.reshape(-1,fm1.shape[-1])
        
        for ind in range(len(min_index)):
            val = min_index[ind]
            src_path_pair = image_paths[val]
            if image_paths[val] == image_paths[i]:
                continue
        
            image2 = Image.open(src_path_pair)
            image2 = image2.crop((40,40,360,360))
            #image1.show()
            fv2, fm2 = embed_single_image(image2, model)
            fm2 = np.moveaxis(fm2,1,3)
            conv2 = fm2.reshape(-1,fm2.shape[-1])
            heatmap1, heatmap2 = combine_SVD(conv1,conv2)
            
            featPCAIm1 = cv2.resize(heatmap1, (imsize[0], imsize[1]), interpolation=cv2.INTER_NEAREST)
            featPCAIm1 = featPCAIm1.astype('uint8')
            
            featPCAIm2 = cv2.resize(heatmap2, (imsize[0], imsize[1]), interpolation=cv2.INTER_NEAREST)
            featPCAIm2 = featPCAIm2.astype('uint8')
            
            # Combine the images with the (interpolated) similarity heatmaps.
            im1 = cv2.imread(src_path_base, 1)
            im1 = im1[40:360, 40:360]
            im1_with_similarity = cv2.addWeighted(im1, 0.5, featPCAIm1, 0.5, 0.0)
            
            im2 = cv2.imread(src_path_pair, 0)
            im2 = im2[40:360, 40:360]
            im2 = cv2.merge((im2,im2,im2))
            im2_with_similarity = cv2.addWeighted(im2, 0.5, featPCAIm2, 0.5, 0.0)
            
            save_img = combine_horz([im1_with_similarity,im2_with_similarity])
            save_path = os.path.join(out_path, 'close_{}.png'.format(ind))
            save_img.save(save_path)
        
        
        for c in range(len(max_index)):
            val = max_index[c]
            src_path_pair = image_paths[val]
            if image_paths[val] == image_paths[i]:
                continue
        
            image2 = Image.open(src_path_pair)
            image2 = image2.crop((40,40,360,360))
            #image1.show()
            fv2, fm2 = embed_single_image(image2, model)
            fm2 = np.moveaxis(fm2,1,3)
            conv2 = fm2.reshape(-1,fm2.shape[-1])
            heatmap1, heatmap2 = combine_SVD(conv1,conv2)
            
            featPCAIm1 = cv2.resize(heatmap1, (imsize[0], imsize[1]), interpolation=cv2.INTER_NEAREST)
            featPCAIm1 = featPCAIm1.astype('uint8')
            
            featPCAIm2 = cv2.resize(heatmap2, (imsize[0], imsize[1]), interpolation=cv2.INTER_NEAREST)
            featPCAIm2 = featPCAIm2.astype('uint8')
            
            # Combine the images with the (interpolated) similarity heatmaps.
            im1 = cv2.imread(src_path_base, 1)
            im1 = im1[40:360, 40:360]
            im1_with_similarity = cv2.addWeighted(im1, 0.7, featPCAIm1, 0.5, 0.0)
            
            im2 = cv2.imread(src_path_pair, 0)
            im2 = im2[40:360, 40:360]
            im2 = cv2.merge((im2,im2,im2))
            im2_with_similarity = cv2.addWeighted(im2, 0.7, featPCAIm2, 0.5, 0.0)
            
            save_img = combine_horz([im1_with_similarity,im2_with_similarity])
            save_path = os.path.join(out_path, 'far_{}.png'.format(c))
            save_img.save(save_path)
            

    return
Пример #11
0
def visualize_pca_jointdata(in_dir, out_dir, model_path):
    
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    
    # load model
    model_state_dict = torch.load(model_path)
    model = network.resnet_50_embedding()
    # remove `module.` for parallel
    new_state_dict = OrderedDict()
    for k, v in model_state_dict['model_state_dict'].items():
        name = k[7:] # remove `module.`
        new_state_dict[name] = v
    # load params
    model.load_state_dict(new_state_dict)
    model.cuda()
    model.eval()
    
    rgb_dir = os.path.join(in_dir, 'rgb')
    thermal_dir = os.path.join(in_dir, 'laser')
    list_dir = os.listdir(rgb_dir)
    
    len_dir = len(list_dir)
    
    for i in range(0,len_dir):
        print(i)
        d = list_dir[i]
        print(d)
        rgb_path = os.path.join(rgb_dir, d)
        thermal_path = os.path.join(thermal_dir, d)
        if not os.path.isdir(rgb_path):
            continue
        if not os.path.isdir(thermal_path):
            continue
        
        out_path = os.path.join(out_dir, d)
        if not os.path.isdir(out_path):
            os.mkdir(out_path)
    
        dataset_rgb = OPENFusionStereoDatasetPath(rgb_path, transform=image_transform)
        dataset_thermal = OPENFusionStereoDatasetPath(thermal_path, transform=image_transform)
        
        # Run the images through the network, get last conv features
        fvecs, maps_rgb, labels, image_path_list_rgb = embed(dataset_rgb,model)
        fvecs, maps_thermal, labels, image_path_list_thermal = embed(dataset_thermal,model)
        
        maps_rgb = np.moveaxis(maps_rgb,1,3)
        maps_thermal = np.moveaxis(maps_thermal,1,3)
        #fvecs = np.array(fvecs)
        #labels = np.array(labels)
        
        imsize = (320,320)
    
        # Compute the spatial similarity maps (returns a heatmap that's the size of the last conv layer)
        for i in range(len(image_path_list_rgb)):
            im1_path = image_path_list_rgb[i][0]
            rgb_time = os.path.basename(im1_path)[:-4]
            rgb_time_int = datetime.strptime(rgb_time, '%Y-%m-%d').date().toordinal()
            for j in range(len(image_path_list_thermal)):
                im2_path = image_path_list_thermal[j][0]
                thermal_time = os.path.basename(im2_path)[:-4]
                thermal_time_int = datetime.strptime(thermal_time, '%Y-%m-%d').date().toordinal()
                if abs(rgb_time_int-thermal_time_int) > 2:
                    continue
                conv1 = maps_rgb[i]
                conv2 = maps_thermal[j]
                heatmap1, heatmap2 = combine_SVD(conv1,conv2)
                
                featPCAIm1 = cv2.resize(heatmap1, (imsize[0], imsize[1]), interpolation=cv2.INTER_NEAREST)
                featPCAIm1 = featPCAIm1.astype('uint8')
                
                featPCAIm2 = cv2.resize(heatmap2, (imsize[0], imsize[1]), interpolation=cv2.INTER_NEAREST)
                featPCAIm2 = featPCAIm2.astype('uint8')
                
                # Combine the images with the (interpolated) similarity heatmaps.
                im1 = cv2.imread(im1_path, 1)
                #im1 = cv2.flip(im1, 0)
                #im1 = cv2.rotate(im1, cv2.ROTATE_90_CLOCKWISE)
                im1 = im1[40:360, 40:360]
                im1 = cv2.resize(im1, (imsize[0], imsize[1]))
                im1_with_similarity = cv2.addWeighted(im1, 0.5, featPCAIm1, 0.5, 0.0)
                
                im2 = cv2.imread(im2_path, 0)
                im2 = im2[40:360, 40:360]
                im2 = cv2.merge((im2,im2,im2))
                im2 = cv2.resize(im2, (imsize[0], imsize[1]))
                im2_with_similarity = cv2.addWeighted(im2, 0.5, featPCAIm2, 0.5, 0.0)
                
                save_img = combine_horz([im1_with_similarity,im2_with_similarity])
                
                save_img.save(os.path.join(out_path,'b{}-{}_t{}-{}.png'.format(os.path.basename(im1_path)[:-4], i, os.path.basename(im2_path)[:-4], j)))
                #Image.fromarray(im2_with_similarity).save(os.path.join(out_path,'b{}_t{}.png'.format(labels[j],labels[i])))
    
    return
Пример #12
0
def pca_optimization(in_dir, out_dir, model_path):
    
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    
    # load model
    model_state_dict = torch.load(model_path)
    model = network.resnet_50_embedding()
    # remove `module.` for parallel
    new_state_dict = OrderedDict()
    for k, v in model_state_dict['model_state_dict'].items():
        name = k[7:] # remove `module.`
        new_state_dict[name] = v
    # load params
    model.load_state_dict(new_state_dict)
    
    model.cuda()
    model.eval()
    imsize = (320,320)
    
    rgb_dir = os.path.join(in_dir, 'rgb')
    thermal_dir = os.path.join(in_dir, 'laser')
    list_dir = os.listdir(rgb_dir)
    
    len_dir = len(list_dir)
    for i in range(0,len_dir):
        print(i)
        d = list_dir[i]
        rgb_path = os.path.join(rgb_dir, d)
        thermal_path = os.path.join(thermal_dir, d)
        if not os.path.isdir(rgb_path):
            continue
        if not os.path.isdir(thermal_path):
            continue
            
        list_rgb_files = os.walk(rgb_path)
        for root, dirs, files in list_rgb_files:
            for f in files:
                base_file = os.path.join(rgb_path, f)
                if os.path.exists(base_file):
                    pair_file = os.path.join(thermal_path, f)
                    if os.path.exists(pair_file):
                        out_path = os.path.join(out_dir, d)
                        if not os.path.isdir(out_path):
                            os.mkdir(out_path)
                        image1 = Image.open(base_file)
                        image1 = image1.crop((40,40,360,360))
                        fv1, fm1 = embed_single_image(image1, model)
                        fm1 = np.moveaxis(fm1,1,3)
                        conv1 = fm1.reshape(-1,fm1.shape[-1])
                        
                        image2 = Image.open(pair_file)
                        image2 = image2.crop((40,40,360,360))
                        fv2, fm2 = embed_single_image(image2, model)
                        fm2 = np.moveaxis(fm2,1,3)
                        conv2 = fm2.reshape(-1,fm2.shape[-1])
                        
                        heatmap1, heatmap2 = combine_SVD(conv1,conv2)
                        
                        featPCAIm1 = cv2.resize(heatmap1, (imsize[0], imsize[1]), interpolation=cv2.INTER_LINEAR)
                        
                        
                        featPCAIm2 = cv2.resize(heatmap2, (imsize[0], imsize[1]), interpolation=cv2.INTER_LINEAR)
                        
                        
                        # Combine the images with the (interpolated) similarity heatmaps.
                        im1 = cv2.imread(base_file, 1)
                        im1 = im1[40:360, 40:360]
                        im1 = cv2.resize(im1, (imsize[0], imsize[1]))
                        
                        
                        im2 = cv2.imread(pair_file, 1)
                        im2 = im2[40:360, 40:360]
                        #im2 = cv2.merge((im2,im2,im2))
                        im2 = cv2.resize(im2, (imsize[0], imsize[1]))
                        
                        #featPCAIm1 = featPCAIm1.astype('uint8')
                        #featPCAIm2 = featPCAIm2.astype('uint8')
                        #im1_with_similarity = cv2.addWeighted(im1, 0.5, featPCAIm1, 0.5, 0.0)
                        #im2_with_similarity = cv2.addWeighted(im2, 0.5, featPCAIm2, 0.5, 0.0)
                        #save_img = combine_horz([im1_with_similarity,im2_with_similarity])
                        #save_img.save(os.path.join(out_path,'{}.png'.format(os.path.basename(base_file)[:-4])))
                        
                        save_path = os.path.join(out_path,'{}_fusion.png'.format(os.path.basename(base_file)[:-4]))
                        
                        '''
                        PCA = torch.from_numpy(featPCAIm1)
                        PCAT = torch.from_numpy(featPCAIm2)
                        Im1 = torch.from_numpy(im1)
                        Im2 = torch.from_numpy(im2)
                        pytorch_optimization_frame(PCA, PCAT, Im1, Im2)
                        '''
                        a, b, _ = pca_template_matching(featPCAIm1, featPCAIm2)
                        save_fusion_img(im1, im2, a, b, save_path)
                        '''
                        im_t = cv2.imread(pair_file, 0)
                        im_t = im2[40:360, 40:360]
                        im_t = cv2.merge((im_t,im_t,im_t))
                        im_t = cv2.resize(im_t, (imsize[0], imsize[1]))
                        x0 = [a,b,1.0]
                        res = minimize(scale_translation_optimization_fun, x0, args=(featPCAIm1, featPCAIm2, im1, im_t), method='trust-constr', constraints=cons, options={'gtol': 1e-10, 'disp': True})
                        print(res)
                        '''
                        
                        
    
    return
Пример #13
0
def visualize_similarity_jointdata(in_dir, out_dir, model_path):
    
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
    
    # load model
    model_state_dict = torch.load(model_path)
    model = network.resnet_50_embedding()
    model.load_state_dict(model_state_dict['model_state_dict'])
    model = model.cuda()
    
    rgb_dir = os.path.join(in_dir, 'rgb')
    thermal_dir = os.path.join(in_dir, 'thermal')
    list_dir = os.listdir(rgb_dir)
    
    for d in list_dir:
        rgb_path = os.path.join(rgb_dir, d)
        thermal_path = os.path.join(thermal_dir, d)
        if not os.path.isdir(rgb_path):
            continue
        if not os.path.isdir(thermal_path):
            continue
        
        out_path = os.path.join(out_dir, d)
        if not os.path.isdir(out_path):
            os.mkdir(out_path)
    
        dataset_rgb = OPENFusionStereoDatasetPath(rgb_path, transform=image_transform)
        dataset_thermal = OPENFusionStereoDatasetPath(thermal_path, transform=image_transform)
        
        # Run the images through the network, get last conv features
        fvecs, maps_rgb, labels, image_path_list_rgb = embed(dataset_rgb,model)
        fvecs, maps_thermal, labels, image_path_list_thermal = embed(dataset_thermal,model)
        
        maps_rgb = np.moveaxis(maps_rgb,1,3)
        maps_thermal = np.moveaxis(maps_thermal,1,3)
        #fvecs = np.array(fvecs)
        #labels = np.array(labels)
    
        # Compute the spatial similarity maps (returns a heatmap that's the size of the last conv layer)
        for i in range(len(image_path_list_rgb)):
            im1_path = image_path_list_rgb[i][0]
            rgb_time = os.path.basename(im1_path)[:-4]
            rgb_time_int = datetime.strptime(rgb_time, '%Y-%m-%d').date().toordinal()
            for j in range(len(image_path_list_thermal)):
                im2_path = image_path_list_thermal[j][0]
                thermal_time = os.path.basename(im2_path)[:-4]
                thermal_time_int = datetime.strptime(thermal_time, '%Y-%m-%d').date().toordinal()
                if abs(rgb_time_int-thermal_time_int) > 2:
                    continue
                conv1 = maps_rgb[i]
                conv2 = maps_thermal[j]
                conv1 = conv1.reshape(-1,conv1.shape[-1])
                conv2 = conv2.reshape(-1,conv2.shape[-1])
                heatmap1, heatmap2 = compute_spatial_similarity(conv1, conv2)
                
                # Combine the images with the (interpolated) similarity heatmaps.
                im1_with_similarity = combine_image_and_heatmap_noninter(load_and_resize(im1_path),heatmap1)
                im2_with_similarity = combine_image_and_heatmap_noninter(load_and_resize_grayscale(im2_path),heatmap2)
                
                save_img = combine_horz([im1_with_similarity,im2_with_similarity])
                
                save_img.save(os.path.join(out_path,'b{}-{}_t{}-{}.png'.format(os.path.basename(im1_path)[:-4], i, os.path.basename(im2_path)[:-4], j)))
                #Image.fromarray(im2_with_similarity).save(os.path.join(out_path,'b{}_t{}.png'.format(labels[j],labels[i])))
    
    return
Пример #14
0
dataloader = torch.utils.data.DataLoader(torch_u_data.ConcatDataset(list_of_datasets), batch_size=batch_size, 
                                         shuffle=False, num_workers=4)
                                         '''
train_dataset = OPENFusionStereoDatasetPath(
    '/media/zli/Seagate Backup Plus Drive/OPEN/ua-mac/Level_3/joint_training_data_test',
    transform=image_transform)
#fusion_dateset = TripletNearDateJointDataset(train_dataset, class_by='plot', date_by='scan_date', neighbor_distance=neighbor_distance, collate_fn=None)
dataloader = torch.utils.data.DataLoader(train_dataset,
                                         batch_size=batch_size,
                                         shuffle=True,
                                         num_workers=4)

model_state_dict = torch.load(
    '/media/zli/Seagate Backup Plus Drive/trained_models/pytorch-py3/checkpoints/NearDateJointTripletMarginLoss/20200915_221501874/epoch_11.pth'
)
model = network.resnet_50_embedding()
#model.load_state_dict(model_state_dict['model_state_dict'])

# inference
model.load_state_dict(model_state_dict['model_state_dict'])
model.cuda()
model.eval()
vector_list = []
label_list = []
image_path_list = []
with torch.no_grad():
    for data, target, image_path in tqdm(dataloader, total=len(dataloader)):
        data = data.cuda()
        vector_list.append(model(data))
        label_list.append(target)
        image_path_list.append(image_path)