Пример #1
0
def extract_features(net, layers, imagepair):
    
    featlist = list()
    for j, im in enumerate(imagepair): 
        outlist = list()
        # prepare image
        im_n = normalize2plot(im)
        im_n = np.moveaxis(im_n, -1, 0)
        im_n = np.expand_dims(im_n, axis=0)
        im_t = torch.as_tensor(im_n)
            
        # get features
        im_out = net([im_t.float(),im_t], 1, extract_features=layers)
        
        # upsample if needed and cast back to numpy array
        for i, fl in enumerate(im_out):
            if i == 0:
                ref_size = fl.shape[2:]
            if fl.shape[2:] != ref_size:
                fl = nn.functional.interpolate(fl, size=ref_size, mode='bilinear', align_corners=True)
            im_out[i] = np.moveaxis(np.squeeze(fl.detach().numpy()), 0,-1)
        
        # concat
        outlist.extend(im_out)
        featlist.append(outlist)
        
    return featlist
Пример #2
0
def extract_features_joint(net, imagepair,layers='joint'):
    
    ima = imagepair[0] 
    imb = imagepair[1]

    # prepare image
    im_na = normalize2plot(ima)
    im_na = np.moveaxis(im_na, -1, 0)
    im_na = np.expand_dims(im_na, axis=0)
    im_ta = torch.as_tensor(im_na)
    
    im_nb = normalize2plot(imb)
    im_nb = np.moveaxis(im_nb, -1, 0)
    im_nb = np.expand_dims(im_nb, axis=0)
    im_tb = torch.as_tensor(im_nb)
            
    # get features
    im_out = net([im_ta.float(),im_tb.float()], 2, extract_features=layers)
    im_out = im_out[0]
    im_out = np.moveaxis(np.squeeze(im_out.detach().numpy()), 0,-1)
            
    return im_out
Пример #3
0
def extract_features_only_feat(net, layers, imagepair):
    
    featlist = list()
    for j, im in enumerate(imagepair): 

        # prepare image
        im_n = normalize2plot(im)
        im_n = np.moveaxis(im_n, -1, 0)
        im_n = np.expand_dims(im_n, axis=0)
        im_t = torch.as_tensor(im_n)
            
        # get features
        im_out = net([im_t.float(),im_t], 1, extract_features=layers)
        im_out = im_out[0]
        im_out = np.moveaxis(np.squeeze(im_out.detach().numpy()), 0,-1)
        
        # concat
        featlist.append(im_out)
        
    return featlist
Пример #4
0
def detect_changes_no_gt(model_settings,
                         directories,
                         dataset_settings,
                         network_settings,
                         train_settings,
                         threshold_methods=['triangle'],
                         threshold_from_file={
                             'f1': 0.8,
                             'AA': 0.5
                         }):
    """
    Calculate the confusion matrix for specified thresholding methods, 
    including inference for images for which no ground truth is available.

    Parameters
    ----------
    model_settings : dict
        Settings used to during training of the model
    directories :  dict
        Directories to load and save data
    dataset_settings : dict
        Settings that specify the used dataset
    network_settings : dict
        Settings that specity the used network
    train_settings : dict
        Settings that specity the training parameters
    threshold_methods : list, optional
        Threshold methods to use. Options: "otsu" | "triangle" | "end-to-end" | "f1" | "AA" 
        The default is ['triangle'].
    threshold_from_file : dict, optional
        specify thresholds to use for AA or f1. The default is {'f1':0.8, 'AA':0.5}.

    Raises
    ------
    Exception
        network_settings['extract_features'] should be one of None | 'joint' | 'diff' | list of layernumbers 

    Returns
    -------
    tp : dict
        True positives of every image in for every thresholding method
    tn : dict
        True negatives of every image in for every thresholding method
    fp : dict
        False positives of every image in for every thresholding method
    fn : dict
        False negatives of every image in for every thresholding method
    th : dict
        Threshold-value applied to get the result
    """

    save_networkname = model_settings['filename'].split('/')[-1]
    if not os.path.exists(
            os.path.join(directories['results_dir_cd'], save_networkname)):
        os.mkdir(os.path.join(directories['results_dir_cd'], save_networkname))
    if not os.path.exists(
            os.path.join(directories['results_dir_cd'], save_networkname,
                         'testset')):
        os.mkdir(
            os.path.join(directories['results_dir_cd'], save_networkname,
                         'testset'))
    if not os.path.exists(
            os.path.join(directories['results_dir_cd'], save_networkname,
                         'testset', network_settings['str_extract_features'])):
        os.mkdir(
            os.path.join(directories['results_dir_cd'], save_networkname,
                         'testset', network_settings['str_extract_features']))
    if not os.path.exists(
            os.path.join(directories['results_dir_cd'], save_networkname,
                         'testset', network_settings['str_extract_features'],
                         'ENVI')):
        os.mkdir(
            os.path.join(directories['results_dir_cd'], save_networkname,
                         'testset', network_settings['str_extract_features'],
                         'ENVI'))

    # get_network
    if network_settings['extract_features'] != None:
        if model_settings.loc['networkname'].endswith('finetune'):
            model_settings.loc['networkname'] = model_settings.loc[
                'networkname'].split('_')[0]
            model_settings.loc['network'] = model_settings.loc[
                'networkname'].split('_')[0]
        n_branches = 2
        net = get_network(model_settings, gpu=train_settings['gpu'])
        classifier = model_settings['cfg_classifier'].split("[")[1]
        classifier = classifier.split("]")[0]
        classifier = classifier.replace("'", "")
        classifier = classifier.replace(" ", "")
        classifier = classifier.split(",")
        conv_classifier = True if classifier[0] == 'C' else False
    else:
        conv_classifier = False

    tp = dict()
    tn = dict()
    fp = dict()
    fn = dict()
    th = dict()

    for method in threshold_methods:
        tp[method] = dict()
        tn[method] = dict()
        fp[method] = dict()
        fn[method] = dict()
        th[method] = dict()

    for idx in dataset_settings['indices_test']:

        im_a = normalize2plot(
            np.load(os.path.join(directories['data_path'],
                                 str(idx) + '_a.npy')))
        im_b = normalize2plot(
            np.load(os.path.join(directories['data_path'],
                                 str(idx) + '_b.npy')))

        if network_settings['extract_features'] != None:
            if isinstance(network_settings['extract_features'], list):
                features = extract_features_only_feat(
                    net, network_settings['extract_features'], [im_a, im_b])
            else:
                # prepare for network
                im_a = torch.as_tensor(
                    np.expand_dims(np.moveaxis(im_a, -1, 0), axis=0))
                im_b = torch.as_tensor(
                    np.expand_dims(np.moveaxis(im_b, -1, 0), axis=0))
                data = [im_a.float(), im_b.float()]

                # get features
                features = net(
                    data,
                    n_branches=n_branches,
                    extract_features=network_settings['extract_features'],
                    conv_classifier=conv_classifier,
                    use_softmax=True)

                features = features.squeeze().detach().numpy()

        # calculate the distancemap
        if conv_classifier == True:
            changemap = np.argmax(features, axis=0)
        else:
            if network_settings['extract_features'] == None:
                distmap = calculate_distancemap(im_a, im_b)
            elif network_settings['extract_features'] == 'joint':
                distmap = calculate_magnitudemap(features)
            elif network_settings['extract_features'] == 'diff':
                distmap = calculate_magnitudemap(features)
            elif isinstance(network_settings['extract_features'], list):
                distmap = calculate_distancemap(features[0], features[1])
            else:
                raise Exception(
                    'distance map calculation not implemented for these settings of extract_features'
                )
            np.save(
                os.path.join(directories['results_dir_cd'], save_networkname,
                             'cva', 'distmap_' + str(idx)), distmap)

            for method in threshold_methods:
                if method == 'f1':
                    threshold = threshold_from_file['f1']
                    changemap = distmap > threshold
                elif method == 'AA':
                    threshold = threshold_from_file['AA']
                    changemap = distmap > threshold
                else:
                    changemap, threshold = calculate_changemap(distmap,
                                                               method=method,
                                                               plot=True)
                np.save(
                    os.path.join(
                        directories['results_dir_cd'], save_networkname, 'cva',
                        'changemap_threshold_' + method + '_' +
                        str(threshold) + '_' + str(idx) + '.npy'), changemap)
                fig, ax = plt.subplots(figsize=(5, 5))
                ax.imshow(changemap, cmap='gray')
                ax.axis('off')
                plt.savefig(
                    os.path.join(
                        directories['results_dir_cd'], save_networkname,
                        'testset', network_settings['str_extract_features'],
                        str(idx) + '_GRAY_threshold-' + method + '-' +
                        str(threshold) + '.png'))
                plt.show()

                # calculate change detection accuracy
                if os.path.exists(
                        os.path.join(directories['labels_path'],
                                     str(idx) + '.npy')):
                    gt = np.load(
                        os.path.join(directories['labels_path'],
                                     str(idx) + '.npy'))
                    gt -= 1
                    gt_pos = gt > 0
                    cm_pos = changemap > 0
                    tp[method][str(idx)] = np.logical_and(cm_pos, gt_pos).sum()
                    tn[method][str(idx)] = np.logical_and(
                        np.logical_not(cm_pos), np.logical_not(gt_pos)).sum()
                    fp[method][str(idx)] = np.logical_and(
                        cm_pos, np.logical_not(gt_pos)).sum()
                    fn[method][str(idx)] = np.logical_and(
                        np.logical_not(cm_pos), gt_pos).sum()
                    th[method][str(idx)] = threshold

                    fig, ax = plot_changemap_colors(gt,
                                                    changemap,
                                                    axis=False,
                                                    title=None)
                    plt.savefig(
                        os.path.join(
                            directories['results_dir_cd'], save_networkname,
                            'testset',
                            network_settings['str_extract_features'],
                            str(idx) + '_COLOR_threshold-' + method + '-' +
                            str(threshold) + '.png'))
                    plt.show()

                else:
                    prediction = changemap.astype(np.int64) + 1
                    array2raster(
                        os.path.join(
                            directories['results_dir_cd'], save_networkname,
                            'testset',
                            network_settings['str_extract_features'], 'ENVI',
                            str(idx) + '_threshold-' + method + '-' +
                            str(threshold) + '.raw'), (0, 0), 1, 1,
                        prediction)  # convert array to raster

    return (tp, tn, fp, fn, th)
Пример #5
0
def detect_changes(model_settings,
                   directories,
                   dataset_settings,
                   network_settings,
                   train_settings,
                   threshold_methods=['triangle']):
    """
    Calculate the confusion matrix for specified thresholding methods

    Parameters
    ----------
    model_settings : dict
        Settings used to during training of the model
    directories :  dict
        Directories to load and save data
    dataset_settings : dict
        Settings that specify the used dataset
    network_settings : dict
        Settings that specity the used network
    train_settings : dict
        Settings that specity the training parameters
    threshold_methods : list, optional
        Threshold methods to use. Options: "otsu" | "triangle" | "end-to-end" | "f1" | "AA" 
        The default is ['triangle'].

    Raises
    ------
    Exception
        network_settings['extract_features'] should be one of None | 'joint' | 'diff' | list of layernumbers 

    Returns
    -------
    tp : dict
        True positives of every image in for every thresholding method
    tn : dict
        True negatives of every image in for every thresholding method
    fp : dict
        False positives of every image in for every thresholding method
    fn : dict
        False negatives of every image in for every thresholding method
    th : dict
        Threshold-value applied to get the result
    """
    save_networkname = model_settings['filename'].split('/')[-1]
    if not os.path.exists(
            os.path.join(directories['results_dir_cd'], save_networkname)):
        os.mkdir(os.path.join(directories['results_dir_cd'], save_networkname))
    if not os.path.exists(
            os.path.join(directories['results_dir_cd'], save_networkname,
                         'cva')):
        os.mkdir(
            os.path.join(directories['results_dir_cd'], save_networkname,
                         'cva'))

    # get_network
    if network_settings['extract_features'] != None:
        if model_settings.loc['networkname'].endswith('finetune'):
            model_settings.loc['networkname'] = model_settings.loc[
                'networkname'].split('_')[0]
            model_settings.loc['network'] = model_settings.loc[
                'networkname'].split('_')[0]
        n_branches = 2
        net = get_network(model_settings, gpu=train_settings['gpu'])
        classifier = model_settings['cfg_classifier'].split("[")[1]
        classifier = classifier.split("]")[0]
        classifier = classifier.replace("'", "")
        classifier = classifier.replace(" ", "")
        classifier = classifier.split(",")
        conv_classifier = True if classifier[0] == 'C' else False
    else:
        conv_classifier = False

    tp = dict()
    tn = dict()
    fp = dict()
    fn = dict()
    th = dict()

    for method in threshold_methods:
        tp[method] = dict()
        tn[method] = dict()
        fp[method] = dict()
        fn[method] = dict()
        th[method] = dict()

    for idx in dataset_settings['indices_eval']:

        im_a = normalize2plot(
            np.load(os.path.join(directories['data_path'],
                                 str(idx) + '_a.npy')))
        im_b = normalize2plot(
            np.load(os.path.join(directories['data_path'],
                                 str(idx) + '_b.npy')))
        gt = np.load(
            os.path.join(directories['labels_path'],
                         str(idx) + '.npy'))
        gt -= 1
        gt_pos = gt > 0

        if network_settings['extract_features'] != None:
            if isinstance(network_settings['extract_features'], list):
                features = extract_features_only_feat(
                    net, network_settings['extract_features'], [im_a, im_b])
            else:
                # prepare for network
                im_a = torch.as_tensor(
                    np.expand_dims(np.moveaxis(im_a, -1, 0), axis=0))
                im_b = torch.as_tensor(
                    np.expand_dims(np.moveaxis(im_b, -1, 0), axis=0))
                data = [im_a.float(), im_b.float()]

                # get features
                features = net(
                    data,
                    n_branches=n_branches,
                    extract_features=network_settings['extract_features'],
                    conv_classifier=conv_classifier,
                    use_softmax=True)

                features = features.squeeze().detach().numpy()

        # calculate the distancemap
        if conv_classifier == True:
            changemap = np.argmax(features, axis=0)
            # calculate change detection accuracy
            cm_pos = changemap > 0
            tp[method][str(idx)] = np.logical_and(cm_pos, gt_pos).sum()
            tn[method][str(idx)] = np.logical_and(
                np.logical_not(cm_pos), np.logical_not(gt_pos)).sum()
            fp[method][str(idx)] = np.logical_and(
                cm_pos, np.logical_not(gt_pos)).sum()
            fn[method][str(idx)] = np.logical_and(np.logical_not(cm_pos),
                                                  gt_pos).sum()
        else:
            if network_settings['extract_features'] == None:
                distmap = calculate_distancemap(im_a, im_b)
            elif network_settings['extract_features'] == 'joint':
                distmap = calculate_magnitudemap(features)
            elif network_settings['extract_features'] == 'diff':
                distmap = calculate_magnitudemap(features)
            elif isinstance(network_settings['extract_features'], list):
                distmap = calculate_distancemap(features[0], features[1])
            else:
                raise Exception(
                    'distance map calculation not implemented for these settings of extract_features'
                )
            np.save(
                os.path.join(directories['results_dir_cd'], save_networkname,
                             'cva', 'distmap_' + str(idx)), distmap)

            for method in threshold_methods:
                if method == 'f1':
                    precision, recall, thresholds = precision_recall_curve(
                        gt.ravel(), distmap.ravel())
                    f1 = (2 * precision * recall) / (precision + recall)
                    threshold = thresholds[np.nanargmax(f1)]
                    changemap = distmap > threshold
                elif method == 'AA':
                    fpr, tpr, thresholds = roc_curve(gt.ravel(),
                                                     distmap.ravel())
                    tnr = 1 - fpr
                    avg_acc = (tpr + tnr) / 2
                    threshold = thresholds[np.nanargmax(avg_acc)]
                    changemap = distmap > threshold
                else:
                    changemap, threshold = calculate_changemap(distmap,
                                                               method=method,
                                                               plot=True)
                np.save(
                    os.path.join(
                        directories['results_dir_cd'], save_networkname, 'cva',
                        'changemap_threshold_' + method + '_' +
                        str(threshold) + '_' + str(idx) + '.npy'), changemap)

                fig, ax = plot_changemap_colors(gt,
                                                changemap,
                                                axis=False,
                                                title=None)
                plt.savefig(
                    os.path.join(
                        directories['results_dir_cd'], save_networkname, 'cva',
                        'changemap_threshold_' + method + '_' +
                        str(threshold) + '_' + str(idx) + '.png'))
                plt.show()

                # calculate change detection accuracy
                cm_pos = changemap > 0
                tp[method][str(idx)] = np.logical_and(cm_pos, gt_pos).sum()
                tn[method][str(idx)] = np.logical_and(
                    np.logical_not(cm_pos), np.logical_not(gt_pos)).sum()
                fp[method][str(idx)] = np.logical_and(
                    cm_pos, np.logical_not(gt_pos)).sum()
                fn[method][str(idx)] = np.logical_and(np.logical_not(cm_pos),
                                                      gt_pos).sum()
                th[method][str(idx)] = threshold

    return (tp, tn, fp, fn, th)
Пример #6
0
def extract_features_vis(directories, dataset_settings, model_settings, train_settings, 
              channels=np.arange(13), percentile=99, extract_features=None, 
              avg_pool=None, use_softmax=False, extract_filter=None):
    
    """ Visualise patches with highest activation """    
    from extract_features import extract_features_only_feat
    import pandas as pd
    from matplotlib.gridspec import GridSpec
    #grid_patches_single = pd.read_csv('/media/cordolo/marrit/results/grid_patches_oscd.csv')
    grid_patches_single = pd.read_csv('grid_patches_oscd.csv')
    grid_patches_single = grid_patches_single[grid_patches_single.variant == 0]
    grid_patches_single = grid_patches_single.reset_index()
    patch_size = 96
    feature_patches = '3'
    
    # get network
    n_branches = 2
    if model_settings['networkname'].startswith('CD') or model_settings['networkname'].startswith('FINETUNE'):
        network, conv_classifier = get_downstream_network(model_settings, train_settings['gpu'], n_branches)
    else:
        network = get_network(model_settings, train_settings['gpu'])
        conv_classifier = True
    # load net to GPU     
    if train_settings['gpu'] != None:
        torch.cuda.set_device(train_settings['gpu'])
        network.cuda()
    
    feature_patches = dict()   
    feat_list = list()
    filename_old = 'none'
    
    #del feature_patches
    for i, r in grid_patches_single.iterrows():
        # get filenames
        filename_a = str(r.im_idx)+'_a.npy'
        filename_b = str(r.im_idx)+'_b.npy'
        if filename_a != filename_old: 
            filename_old = filename_a
            # prepare images
            im_a = np.load(os.path.join(directories['data_path'], filename_a))
            im_b = np.load(os.path.join(directories['data_path'], filename_b))
        
            features = extract_features_only_feat(network,extract_features,[im_a, im_b])
            
        for extract_filter in np.arange(32): 
            feat_a = np.expand_dims(features[0][:,:,extract_filter],axis=0)
            feat_b = np.expand_dims(features[1][:,:,extract_filter],axis=0)
                
            patch_start = [r.row, r.col]
            
            patch0 = feat_a[:,patch_start[0]:patch_start[0]+patch_size,
                         patch_start[1]:patch_start[1]+patch_size]
            patch1 = feat_b[:,patch_start[0]:patch_start[0]+patch_size,
                         patch_start[1]:patch_start[1]+patch_size]
            if i == 0:
                feature_patches[str(extract_filter)] = np.concatenate([patch0, patch1], axis=0)
            else:
                feature_patches[str(extract_filter)] = np.concatenate([feature_patches[str(extract_filter)], patch0, patch1], axis=0)
       
        print('\r {}/{}'.format(i+1, len(grid_patches_single)), end='') 
        
        #grid_patches = pd.read_csv('/media/cordolo/marrit/results/grid_patches_oscd.csv')
        grid_patches = pd.read_csv('grid_patches_oscd.csv')
        grid_patches = grid_patches[grid_patches.variant.isin([0,1])]
        grid_patches = grid_patches.reset_index()
        fig = plt.figure(figsize=(5,7))
        n_rows = 12
        n_cols = 6
        plt_idx = 0
        gs = GridSpec(n_rows, n_cols)
        for extract_filter in [0,6,7,9,12,13,14,19,24,26,30,31]: 
            #sorted_means = np.flip(np.argsort(np.mean(feature_patches[str(extract_filter)], axis=(1,2))))
            sorted_l1norm = np.flip(np.argsort(np.sum(np.abs(feature_patches[str(extract_filter)]), axis=(1,2))))
            for q in range(6):
                #print(sorted_means[q])
                print(grid_patches.iloc[sorted_l1norm[int(q)]].im_idx)
                row_idx = sorted_l1norm[int(q)]
                grid_row = grid_patches.iloc[sorted_l1norm[int(q)]]
                if row_idx % 2 == 0:
                    filename = str(grid_row.im_idx)+'_a.npy'       
                else:
                    filename = str(grid_row.im_idx)+'_b.npy'      
                im = np.load(os.path.join(directories['data_path'], filename))
                patch_start = [grid_row.row, grid_row.col]
                patch = im[patch_start[0]:patch_start[0]+patch_size,
                             patch_start[1]:patch_start[1]+patch_size, [3,2,1]]
                
                ax = fig.add_subplot(gs[plt_idx])
                ax.imshow(normalize2plot(patch))
                ax.axis('off')
                plt_idx +=1
            print('---------------NEW FILTER-------------------')
        plt.show()
    print('all probability maps saved!')
Пример #7
0
def inference(directories, dataset_settings, model_settings, train_settings, 
              channels=np.arange(13), percentile=99, extract_features=None, 
              avg_pool=None, use_softmax=False):
    """ Calculate probability maps based on extract feature layers """
    
    # get network
    n_branches = 2
    if model_settings['networkname'].startswith('CD') or model_settings['networkname'].startswith('FINETUNE'):
        network, conv_classifier = get_downstream_network(model_settings, train_settings['gpu'], n_branches)
    else:
        network = get_network(model_settings, train_settings['gpu'])
        conv_classifier = True
    # load net to GPU     
    if train_settings['gpu'] != None:
        torch.cuda.set_device(train_settings['gpu'])
        network.cuda()
       
    for q, idx in enumerate(dataset_settings['indices_test']):
        # get filenames
        filenames = [str(idx)+'_a.npy', str(idx)+'_b.npy']
        # prepare images
        images = load_images(directories['data_path'], filenames, channels=channels)    
        for filename in filenames:
            images[filename] = normalize2plot(images[filename], percentile)      
        images = channelsfirst(images)
        images = to_torch(images)
         
        if isinstance(extract_features, list): 
            prob_maps = inference_on_images_hypercolumn(network=network,    
                                                  images=images, 
                                                  conv_classifier=conv_classifier, 
                                                  n_branches=n_branches, 
                                                  gpu=train_settings['gpu'], 
                                                  extract_features=extract_features,
                                                  avg_pool=avg_pool,
                                                  use_softmax=use_softmax)
        else:
            out = inference_on_images(network=network, 
                                      images=images, 
                                      conv_classifier=conv_classifier, 
                                      n_branches=n_branches, 
                                      gpu=train_settings['gpu'], 
                                      extract_features=extract_features,
                                      avg_pool=avg_pool,
                                      use_softmax=use_softmax)
            
            
            #back to numpy
            prob_maps = out.squeeze().detach().cpu().numpy()
        
        save_networkname = model_settings['filename'].split('/')[-1]
        if not os.path.exists(os.path.join(directories['results_dir_cd'], save_networkname)):
            os.mkdir(os.path.join(directories['results_dir_cd'], save_networkname))
        if not os.path.exists(os.path.join(directories['results_dir_cd'], save_networkname,'probability_maps')):
            os.mkdir(os.path.join(directories['results_dir_cd'], save_networkname,'probability_maps'))
        
        np.save(os.path.join(directories['results_dir_cd'], save_networkname,'probability_maps',str(idx)), prob_maps)
    
        print('\r {}/{}'.format(q+1, len(dataset_settings['indices_test'])))
        
    print('all probability maps saved!')
Пример #8
0
def extract_features_perImage(directories, imagelist, model_settings, layers):
    """
    extract features from specified layers (in branch) using trained model 

    Parameters
    ----------
    directories : dict
        dictionary with strings specifying directories
    image : numpy nd.array of shape (N,M,D)
        image to extract features from, D should be equal to n_channels in model_settings.
    model_settings : dict 
        settings of trained model
    layers : list
        list of integers, representing the layers to extract features from.
        layers should be in branch of model.

    Raises
    ------
    Exception
        if architecture specified in model_settings is undefied an exception is raised

    Returns
    -------
    np.ndarray of shape (N,M,D)
        al featuremaps are upsampled to original image size and concatenated in D.

    """
    # cfgs are saved as strings, cast back to list
    branch = model_settings['cfg_branch'].split("[" )[1]
    branch = branch.split("]" )[0]
    branch = branch.replace("'", "")
    branch = branch.replace(" ", "")
    branch = branch.split(",")
    top = model_settings['cfg_top'].split("[" )[1]
    top = top.split("]" )[0]
    top = top.replace("'", "")
    top = top.replace(" ", "")
    top = top.split(",")
    classifier = model_settings['cfg_classifier'].split("[" )[1]
    classifier = classifier.split("]" )[0]
    classifier = classifier.replace("'", "")
    classifier = classifier.replace(" ", "")
    classifier = classifier.split(",")
    # save in model_settigns
    model_settings['cfg'] = {'branch': np.array(branch, dtype='object'), 
                             'top': np.array(top,dtype='object'),
                             'classifier': np.array(classifier, dtype='object')}
    
    # batch_norm saved as string cast back to bool
    if model_settings['batch_norm'] == 'False' : 
        model_settings['batch_norm'] = False
    elif model_settings['batch_norm'] == 'True' : 
        model_settings['batch_norm'] = True
          
    # build network
    model_settings['network'] = model_settings['networkname']
    if model_settings['network'] == 'siamese':
        n_branches = 2
    elif model_settings['network'] == 'triplet':
        n_branches = 3
        model_settings['network'] = 'siamese'
    elif model_settings['network'] == 'hypercolumn':
        n_branches = 2
    else:
        raise Exception('Architecture undefined! \n \
                        Choose one of: "siamese", "triplet", hypercolumn')
        
        
    net = NetBuilder.build_network(
        net=model_settings['network'],
        cfg=model_settings['cfg'],
        n_channels=int(model_settings['n_channels']), 
        n_classes=int(model_settings['n_classes']),
        patch_size=int(model_settings['patch_size']),
        im_size=(96,96),
        batch_norm=model_settings['batch_norm'],
        n_branches=n_branches)  
    
    # load weights
    net.load_state_dict(torch.load(model_settings['filename']))
              
    featlist = list()
    for j, im in enumerate(imagelist): 
        outlist = list()
        # prepare image
        im_n = normalize2plot(im)
        im_n = np.moveaxis(im_n, -1, 0)
        im_n = np.expand_dims(im_n, axis=0)
        im_t = torch.as_tensor(im_n)
        
    
        # get features
        im_out = net([im_t.float(),im_t], 1, extract_features=layers)
        
        # upsample if needed and cast back to numpy array
        for i, fl in enumerate(im_out):
            if i == 0:
                ref_size = fl.shape[2:]
            if fl.shape[2:] != ref_size:
                fl = nn.functional.interpolate(fl, size=ref_size, mode='bilinear', align_corners=True)
            im_out[i] = np.moveaxis(np.squeeze(fl.detach().numpy()), 0,-1)
        
        # concat
        outlist.extend(im_out)
        featlist.append(outlist)
        
    return outlist