Ejemplo n.º 1
0
def merge_json_from_data_dir(fnames: Sequence[str], output_fname: str):
    """
    Function concatenates the data directory to the list of file names and concatenats the related jsons
    """
    # Test concatenating jsons
    full_fnames = []
    for fname in fnames:
        full_fname = os.path.join(get_data_dir(), fname)
        full_fnames.append(full_fname)

    # Concatenate the test and training data sets
    full_output_name = os.path.join(get_data_dir(), output_fname)
    all_ds = WkwData.concat_datasources(json_paths_in=full_fnames,
                                        json_path_out=full_output_name)
    return all_ds
Ejemplo n.º 2
0
def WkwDataSetConstructor():
    """ Construsts a WkwData[set] from fixed parameters. These parameters can also be explored for 
        further testing"""    
    # Get data source from example json
    json_dir = gpath.get_data_dir()
    datasources_json_path = os.path.join(json_dir, 'datasource_20X_980_980_1000bboxes.json')
    data_sources = WkwData.datasources_from_json(datasources_json_path)
    # Only pick the first two bboxes for faster epoch
    data_sources = data_sources[0:2]
    data_split = DataSplit(train=0.70, validation=0.00, test=0.30)
    # input, output shape
    input_shape = (28, 28, 1)
    output_shape = (28, 28, 1)
    # flags for memory and storage caching
    cache_RAM = True
    cache_HDD = True
    # HDD cache directory
    connDataDir = '/conndata/alik/genEM3_runs/VAE/'
    cache_root = os.path.join(connDataDir, '.cache/')
    dataset = WkwData(
        input_shape=input_shape,
        target_shape=output_shape,
        data_sources=data_sources,
        data_split=data_split,
        normalize=False,
        transforms=ToZeroOneRange(minimum=0, maximum=255),
        cache_RAM=cache_RAM,
        cache_HDD=cache_HDD,
        cache_HDD_root=cache_root
    )
    return dataset
Ejemplo n.º 3
0
def make_skel_from_json(json_path: str):
    """
    Creates a skeleton object from the binary targets of the data sources in json
    Args:
        json_path: the path of the data source json file
    Returns:
        skel: the skeleton object
    """
    data_sources_dict = WkwData.convert_ds_to_dict(
        WkwData.read_short_ds_json(json_path=json_path))
    # Init with empty skeleton
    empty_skel_name = os.path.join(get_data_dir(), 'NML', 'empty_skel.nml')
    skel = wkskel.Skeleton(nml_path=empty_skel_name)

    # Loop over each bbox
    keys = list(data_sources_dict.keys())
    num_nodes_perTree = 5
    for idx, key in tqdm(enumerate(keys),
                         desc='Making bbox nml',
                         total=len(keys)):
        # Get minimum and maximum node id
        min_id = (num_nodes_perTree * idx) + 1
        max_id = num_nodes_perTree * (idx + 1)
        # Encode the target in the tree name
        cur_target = data_sources_dict[key]['target_class']
        cur_name = f'{key}, Debris: {cur_target[0]}, Myelin: {cur_target[1]}'
        # add current tree
        add_bbox_tree(skel=skel,
                      bbox=data_sources_dict[key]['input_bbox'],
                      tree_name=cur_name,
                      node_id_min_max=[min_id, max_id])
    return skel
Ejemplo n.º 4
0
 def config_wkwdata(datasources_json_path: str = None,
                    input_shape: Tuple = (140, 140, 1),
                    output_shape: Tuple = (140, 140, 1),
                    cache_HDD: bool = False,
                    cache_RAM: bool = False,
                    batch_size: int = 256,
                    num_workers: int = 8):
     """ Return a named tuple with the parameters for initialization of a wkwdata"""
     fieldnames = 'input_shape, output_shape, cache_RAM, cache_HDD, batch_size, num_workers, cache_HDD_root, datasources_json_path'
     config = namedtuple('config', fieldnames)
     config.datasources_json_path = datasources_json_path
     config.input_shape = input_shape
     config.output_shape = output_shape
     config.cache_RAM = cache_RAM
     config.cache_HDD = cache_HDD
     config.batch_size = batch_size
     config.num_workers = num_workers
     config.cache_HDD_root = os.path.join(get_data_dir(), '.cache/')
     return config
Ejemplo n.º 5
0
import os
from genEM3.data.wkwdata import WkwData
from genEM3.util.path import get_data_dir
# Read the data
json_name = os.path.join(get_data_dir(), 'combined', 'combined_20K_patches.json')
data_sources = WkwData.read_short_ds_json(json_path=json_name)
# Read an old json for comparison
old_json_name = os.path.join(get_data_dir(), 'dense_3X_10_10_2_um/original_merged_double_binary_v01.json')
old_example = WkwData.datasources_from_json(old_json_name)
# Write a copy [with some modifications]
ouput_name = os.path.join(get_data_dir(), 'combined', 'copyTest_20K_patches.json')
WkwData.write_short_ds_json(datasources=data_sources, json_path=ouput_name, convert_to_short=True)
Ejemplo n.º 6
0
from genEM3.data.wkwdata import WkwData, DataSplit
from genEM3.model.autoencoder2d import Encoder_4_sampling_bn_1px_deep_convonly_skip, AE_Encoder_Classifier, Classifier3Layered
from genEM3.training.multiclass import Trainer, subsetWeightedSampler
from genEM3.util.path import get_data_dir, gethostnameTimeString

# Train dataset: Create the dataset for training data
run_root = os.path.dirname(os.path.abspath(__file__))
input_shape = (140, 140, 1)
output_shape = (140, 140, 1)

data_split = DataSplit(train=0.85, validation=0.15, test=0.00)
cache_RAM = True
cache_HDD = False
batch_size = 256
num_workers = 8
datasources_json_path = os.path.join(get_data_dir(), 'dense_3X_10_10_2_um/test_data_three_bboxes_with_myelin_v01.json')
data_sources = WkwData.datasources_from_json(datasources_json_path)

transforms = transforms.Compose([
    transforms.RandomFlip(p=0.5, flip_plane=(1, 2)),
    transforms.RandomFlip(p=0.5, flip_plane=(2, 1)),
    transforms.RandomRotation90(p=1.0, mult_90=[0, 1, 2, 3], rot_plane=(1, 2))
])

train_dataset = WkwData(
    input_shape=input_shape,
    target_shape=output_shape,
    data_sources=data_sources,
    data_split=data_split,
    transforms=transforms,
    cache_RAM=cache_RAM,
Ejemplo n.º 7
0
from genEM3.util.path import get_data_dir
from genEM3.data.wkwdata import WkwData
import os

# Test concatenating jsons
test_json_path = os.path.join(get_data_dir(), 'test_data_three_bboxes.json')
train_json_path = os.path.join(
    get_data_dir(), 'debris_clean_added_bboxes2_wiggle_datasource.json')
# Concatenate the test and training data sets
output_name = os.path.join
all_ds = WkwData.concat_datasources([train_json_path, test_json_path],
                                    os.path.join(get_data_dir(),
                                                 'train_test_combined.json'))
assert len(all_ds) == len(WkwData.datasources_from_json(test_json_path)) + len(
    WkwData.datasources_from_json(train_json_path))
Ejemplo n.º 8
0
import os
import torch
import numpy as np

from genEM3.data import transforms
from genEM3.data.wkwdata import WkwData, DataSplit
from genEM3.model.autoencoder2d import Encoder_4_sampling_bn_1px_deep_convonly_skip, AE_Encoder_Classifier, Classifier3Layered
from genEM3.training.classifier import Trainer, subsetWeightedSampler
from genEM3.util.path import get_data_dir, gethostnameTimeString
# Parameters
run_root = os.path.dirname(os.path.abspath(__file__))
cache_HDD_root = os.path.join(get_data_dir(), '.cache/')
datasources_json_path = os.path.join(get_data_dir(), 'train_test_combined.json')
state_dict_path = '/u/flod/code/genEM3/runs/training/ae_v05_skip/.log/epoch_60/model_state_dict'
input_shape = (140, 140, 1)
output_shape = (140, 140, 1)

data_split = DataSplit(train=0.85, validation=0.15, test=0.00)
cache_RAM = True
cache_HDD = True
batch_size = 256
num_workers = 8

data_sources = WkwData.datasources_from_json(datasources_json_path)

transforms = transforms.Compose([
    transforms.RandomFlip(p=0.5, flip_plane=(1, 2)),
    transforms.RandomFlip(p=0.5, flip_plane=(2, 1)),
    transforms.RandomRotation90(p=1.0, mult_90=[0, 1, 2, 3], rot_plane=(1, 2))
])
Ejemplo n.º 9
0
import os
import cProfile
import pstats

from genEM3.util.path import get_data_dir
from genEM3.data.skeleton import make_skel_from_json

# start profiling
profiler = cProfile.Profile()
profiler.enable()

# read the json data sources
json_path = os.path.join(get_data_dir(), 'combined', 'combined_20K_patches.json')
skel = make_skel_from_json(json_path=json_path)

# Finish profiling
profiler.disable()
stats = pstats.Stats(profiler).sort_stats('cumtime')
stats.print_stats()
# write the nml file
nml_name = os.path.join(get_data_dir(), 'NML', 'combined_20K_patches.nml')
skel.write_nml(nml_write_path=nml_name)
Ejemplo n.º 10
0
import os

from genEM3.data.wkwdata import WkwData
from genEM3.util.path import get_data_dir

# Read Json file
json_names = [
    'dense_3X_10_10_2_um/original_merged_double_binary_v01.json',
    '10x_test_bboxes/10X_9_9_1_um_double_binary_v01.json'
]
ds_names = [os.path.join(get_data_dir(), j_name) for j_name in json_names]
data_sources = WkwData.concat_datasources(ds_names)
# Get the short version of the data sources
output_name = os.path.join(get_data_dir(), 'combined',
                           'combined_20K_patches.json')
short_ds = WkwData.convert_to_short_ds(data_sources=data_sources)
# Write combined data source json file
WkwData.write_short_ds_json(datasources=short_ds, json_path=output_name)
Ejemplo n.º 11
0
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
# Train dataset: Create the dataset for training data
run_root = os.path.dirname(os.path.abspath(__file__))
input_shape = (140, 140, 1)
output_shape = (140, 140, 1)

data_split = DataSplit(train=0.70, validation=0.15, test=0.15)
cache_RAM = True
cache_HDD = False
batch_size = 1024
num_workers = 8

# Read the data sources
json_name = os.path.join(get_data_dir(), 'combined',
                         'combined_20K_patches.json')
data_sources = WkwData.read_short_ds_json(json_path=json_name)

transforms = transforms.Compose([
    transforms.RandomFlip(p=0.5, flip_plane=(1, 2)),
    transforms.RandomFlip(p=0.5, flip_plane=(2, 1)),
    transforms.RandomRotation90(p=1.0, mult_90=[0, 1, 2, 3], rot_plane=(1, 2))
])

dataset = WkwData(input_shape=input_shape,
                  target_shape=output_shape,
                  data_sources=data_sources,
                  data_split=data_split,
                  transforms=transforms,
                  cache_RAM=cache_RAM,
Ejemplo n.º 12
0
# To add a new markdown cell, type '# %% [markdown]'
# %%
import os
import time
import pickle
import itertools
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt

from genEM3.data.wkwdata import WkwData, DataSource
from genEM3.util.path import get_data_dir
import genEM3.data.annotation as annotation
# %% Prepare for annotation
# Loaded the json file for the dataset
json_dir = os.path.join(get_data_dir(),
                        'debris_clean_added_bboxes2_wiggle_datasource.json')
config = WkwData.config_wkwdata(json_dir)
dataset = WkwData.init_from_config(config)

# Get a set of data sources with the normal bounding boxes to create a patch wise detaset and a larger bounding box for annotation
margin = 35
roi_size = 140
source_dict = annotation.patch_source_list_from_dataset(dataset=dataset,
                                                        margin=margin,
                                                        roi_size=roi_size)
dataset_dict = dict.fromkeys(source_dict)

for key in source_dict:
    cur_source = source_dict[key]
    cur_patch_shape = tuple(cur_source[0].input_bbox[3:6])
Ejemplo n.º 13
0
def main():
    parser = argparse.ArgumentParser(description='Convolutional VAE for 3D electron microscopy data')
    parser.add_argument('--result_dir', type=str, default='.log', metavar='DIR',
                        help='output directory')
    parser.add_argument('--batch_size', type=int, default=256, metavar='N',
                        help='input batch size for training (default: 256)')
    parser.add_argument('--epochs', type=int, default=100, metavar='N',
                        help='number of epochs to train (default: 100)')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--resume', default='', type=str, metavar='PATH',
                        help='path to latest checkpoint (default: None')

    # model options
    # Note(AK): with the AE models from genEM3, the 2048 latent size and 16 fmaps are fixed
    parser.add_argument('--latent_size', type=int, default=2048, metavar='N',
                        help='latent vector size of encoder')
    parser.add_argument('--max_weight_KLD', type=float, default=1.0, metavar='N',
                        help='Weight for the KLD part of loss')

    args = parser.parse_args()
    print('The command line argument:\n')
    print(args)

    # Make the directory for the result output
    if not os.path.isdir(args.result_dir):
        os.makedirs(args.result_dir)

    torch.manual_seed(args.seed)
    # Parameters
    warmup_kld = True
    connDataDir = '/conndata/alik/genEM3_runs/VAE/'
    json_dir = gpath.get_data_dir()
    datasources_json_path = os.path.join(json_dir, 'datasource_20X_980_980_1000bboxes.json')
    input_shape = (140, 140, 1)
    output_shape = (140, 140, 1)
    data_sources = WkwData.datasources_from_json(datasources_json_path)
    # # Only pick the first bboxes for faster epoch
    # data_sources = [data_sources[0]]
    data_split = DataSplit(train=0.80, validation=0.00, test=0.20)
    cache_RAM = True
    cache_HDD = True
    cache_root = os.path.join(connDataDir, '.cache/')
    gpath.mkdir(cache_root)

    # Set up summary writer for tensorboard
    constructedDirName = ''.join([f'weightedVAE_{args.max_weight_KLD}_warmup_{warmup_kld}_', gpath.gethostnameTimeString()])
    tensorBoardDir = os.path.join(connDataDir, constructedDirName)
    writer = SummaryWriter(log_dir=tensorBoardDir)
    launch_tb(logdir=tensorBoardDir, port='7900')
    # Set up data loaders
    num_workers = 8
    dataset = WkwData(
        input_shape=input_shape,
        target_shape=output_shape,
        data_sources=data_sources,
        data_split=data_split,
        normalize=False,
        transforms=ToStandardNormal(mean=148.0, std=36.0),
        cache_RAM=cache_RAM,
        cache_HDD=cache_HDD,
        cache_HDD_root=cache_root
    )
    # Data loaders for training and test
    train_sampler = SubsetRandomSampler(dataset.data_train_inds)
    train_loader = torch.utils.data.DataLoader(
        dataset=dataset, batch_size=args.batch_size, num_workers=num_workers, sampler=train_sampler,
        collate_fn=dataset.collate_fn)

    test_sampler = SubsetRandomSampler(dataset.data_test_inds)
    test_loader = torch.utils.data.DataLoader(
        dataset=dataset, batch_size=args.batch_size, num_workers=num_workers, sampler=test_sampler,
        collate_fn=dataset.collate_fn)
    # Model and optimizer definition
    input_size = 140
    output_size = 140
    kernel_size = 3
    stride = 1
    # initialize with the given value of KLD (maximum value in case of a warmup scenario)
    weight_KLD = args.max_weight_KLD
    model = ConvVAE(latent_size=args.latent_size,
                    input_size=input_size,
                    output_size=output_size,
                    kernel_size=kernel_size,
                    stride=stride,
                    weight_KLD=weight_KLD).to(device)
    # Add model to the tensorboard as graph
    add_graph(writer=writer, model=model, data_loader=train_loader, device=device)
    # print the details of the model
    print_model = True
    if print_model:
        model.summary(input_size=input_size, device=device.type)
    # set up optimizer
    optimizer = optim.Adam(model.parameters(), lr=1e-3)

    start_epoch = 0
    best_test_loss = np.finfo('f').max

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print('=> loading checkpoint %s' % args.resume)
            checkpoint = torch.load(args.resume)
            start_epoch = checkpoint['epoch'] + 1
            best_test_loss = checkpoint['best_test_loss']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print('=> loaded checkpoint %s' % args.resume)
        else:
            print('=> no checkpoint found at %s' % args.resume)
    # Training loop
    for epoch in range(start_epoch, args.epochs):
        # warmup the kld error linearly
        if warmup_kld:
            model.weight_KLD.data = torch.Tensor([((epoch+1) / args.epochs) * args.max_weight_KLD]).to(device) 

        train_loss, train_lossDetailed = train(epoch, model, train_loader, optimizer, args,
                                               device=device)
        test_loss, test_lossDetailed = test(epoch, model, test_loader, writer, args,
                                            device=device)

        # logging, TODO: Use better tags for the logging
        cur_weight_KLD = model.weight_KLD.detach().item()
        writer.add_scalar('loss_train/weight_KLD', cur_weight_KLD, epoch)
        writer.add_scalar('loss_train/total', train_loss, epoch)
        writer.add_scalar('loss_test/total', test_loss, epoch)
        writer.add_scalars('loss_train', train_lossDetailed, global_step=epoch)
        writer.add_scalars('loss_test', test_lossDetailed, global_step=epoch)
        # add the histogram of weights and biases plus their gradients
        for name, param in model.named_parameters():
            writer.add_histogram(name, param.detach().cpu().data.numpy(), epoch)
            # weight_KLD is a parameter but does not have a gradient. It creates an error if one 
            # tries to plot the histogram of a None variable
            if param.grad is not None:
                writer.add_histogram(name+'_gradient', param.grad.cpu().numpy(), epoch)
        # plot mu and logvar
        for latent_prop in ['cur_mu', 'cur_logvar']:
            latent_val = getattr(model, latent_prop)
            writer.add_histogram(latent_prop, latent_val.cpu().numpy(), epoch)
        # flush them to the output
        writer.flush()
        print('Epoch [%d/%d] loss: %.3f val_loss: %.3f' % (epoch + 1, args.epochs, train_loss, test_loss))
        is_best = test_loss < best_test_loss
        best_test_loss = min(test_loss, best_test_loss)
        save_directory = os.path.join(tensorBoardDir, '.log')
        save_checkpoint({'epoch': epoch,
                         'best_test_loss': best_test_loss,
                         'state_dict': model.state_dict(),
                         'optimizer': optimizer.state_dict()},
                        is_best,
                        save_directory)

        with torch.no_grad():
            # Image 64 random sample from the prior latent space and decode
            sample = torch.randn(64, args.latent_size).to(device)
            sample = model.decode(sample).cpu()
            sample_uint8 = undo_normalize(sample, mean=148.0, std=36.0)
            img = make_grid(sample_uint8)
            writer.add_image('sampling', img, epoch)
Ejemplo n.º 14
0
import os
import wkskel
import numpy as np
from genEM3.util.path import get_data_dir, getMag8DatasetDir
from genEM3.data.wkwdata import DataSource, WkwData
from genEM3.data.skeleton import getAllTreeCoordinates
from genEM3.util.image import bboxesFromArray
nmlPath = os.path.join(get_data_dir(), 'artefact_trainingData.nml')
skel = wkskel.Skeleton(nmlPath)

# Get coordinates of the debris locations
coordArray = getAllTreeCoordinates(skel)
numTrainingExamples = 600
assert coordArray.shape == (numTrainingExamples, 3)

# Get the bounding boxes of each debris location and read into numpy array
dimsForCrop = np.array([140, 140, 0])
bboxes_debris = bboxesFromArray(coordArray, dimsForCrop)

# The clean bounding boxes (inspected by me)
bboxes_clean = [[24310, 22880, 640, 140, 140, 50],
                [24868, 20876, 1731, 140, 140, 50],
                [30163, 16682, 662, 140, 140, 50],
                [25985, 17030, 2768, 140, 140, 50],
                [21980, 15643, 2705, 140, 140, 50],
                [27701, 20539, 2881, 140, 140, 50],
                [21052, 16640, 3107, 140, 140, 50],
                [19631, 15376, 3267, 140, 140, 50],
                [24568, 15582, 3365, 140, 140, 50],
                [24761, 15838, 3341, 140, 140, 50],
                [29011, 18583, 4956, 140, 140, 50],
Ejemplo n.º 15
0
file_name = '/u/alik/code/genEM3/playground/AK/classifier/.log/10X_9_9_1_um_with_myelin_Final.pkl'
w_loaded = Widget.load(file_name=file_name)
# Get the datasources
source_list = []
sources_fromWidget = w_loaded.dataset.data_sources
for i, cur_source in enumerate(sources_fromWidget):
    # correct the bbox back to the original bbox
    # fix shape
    cur_input_bbox = remove_bbox_margin(cur_source.input_bbox, margin=35)
    cur_target_bbox = remove_bbox_margin(cur_source.target_bbox, margin=35)
    # Update the binary targets to two binary decisions for the presence of image artefacts and Myelin
    cur_targets = [
        w_loaded.annotation_list[i][1].get('Debris'),
        w_loaded.annotation_list[i][1].get('Myelin')
    ]
    source_list.append(
        DataSource(id=cur_source.id,
                   input_path=getMag8DatasetDir(),
                   input_bbox=cur_input_bbox,
                   input_mean=cur_source.input_mean,
                   input_std=cur_source.input_std,
                   target_path=getMag8DatasetDir(),
                   target_bbox=cur_target_bbox,
                   target_class=cur_targets,
                   target_binary=cur_source.target_binary))

# Json name
json_name = os.path.join(get_data_dir(), '10x_test_bboxes',
                         '10X_9_9_1_um_double_binary_v01.json')
# Write to json file
WkwData.datasources_to_json(source_list, json_name)
Ejemplo n.º 16
0
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')

# Data settings
run_root = os.path.dirname(os.path.abspath(__file__))
input_shape = (140, 140, 1)
output_shape = (140, 140, 1)
data_split = DataSplit(train=0.70, validation=0.15, test=0.15)
cache_RAM = False
cache_HDD = False
batch_size = 1024
num_workers = 0

# Data sources
json_name = os.path.join(get_data_dir(), 'combined', 'combined_20K_patches.json')
data_sources = WkwData.read_short_ds_json(json_path=json_name)
transformations = WkwData.get_common_transforms()
# Data set
dataset = WkwData(
    input_shape=input_shape,
    target_shape=output_shape,
    data_sources=data_sources,
    data_split=data_split,
    transforms=transformations,
    cache_RAM=cache_RAM,
    cache_HDD=cache_HDD)
# Data loaders
data_loader_params = {'dataset': dataset, 'batch_size': batch_size,
                      'num_workers': num_workers, 'collate_fn': dataset.collate_fn}
data_loaders = data_loaders_split(params=data_loader_params)
Ejemplo n.º 17
0
skeletons = [Skeleton(skel_dir) for skel_dir in skel_dirs]
print(f'Time to read skeleton: {time.time() - start}')
# Read the coordinates and target class of all three skeletons into the volume data frame
volume_df = get_volume_df(skeletons=skeletons)
# Get the ingredients for making the datasources
bboxes = bboxesFromArray(volume_df[['x', 'y', 'z']].values)
input_dir = '/tmpscratch/webknossos/Connectomics_Department/2018-11-13_scMS109_1to7199_v01_l4_06_24_fixed_mag8_artifact_pred/color/1'
target_class = volume_df['class'].values.astype(np.float)
target_binary = 1
target_dir = input_dir
input_mean = 148.0
input_std = 36.0
# Create a list of data sources
source_list = []
for i, cur_bbox in enumerate(bboxes):
    cur_target = target_class[i]
    source_list.append(
        DataSource(id=str(i),
                   input_path=input_dir,
                   input_bbox=cur_bbox.tolist(),
                   input_mean=input_mean,
                   input_std=input_std,
                   target_path=target_dir,
                   target_bbox=cur_bbox.tolist(),
                   target_class=cur_target,
                   target_binary=target_binary))
# Json name
json_name = os.path.join(get_data_dir(), 'test_data_three_bboxes.json')
# Write to json file
WkwData.datasources_to_json(source_list, json_name)
Ejemplo n.º 18
0
import os

from genEM3.data.wkwdata import WkwData
from genEM3.util.path import get_data_dir

# Read Json file
json_names = ['dense_3X_10_10_2_um/original_merged_double_binary_v01.json', 
             '10x_test_bboxes/10X_9_9_1_um_double_binary_v01.json']
ds_names = [os.path.join(get_data_dir(), j_name) for j_name in json_names]
data_sources = []
dataset_path = '/tmpscratch/webknossos/Connectomics_Department/2018-11-13_scMS109_1to7199_v01_l4_06_24_fixed_mag8_artifact_pred/color/1'
for ds in ds_names:
    cur_ds = WkwData.datasources_from_json(json_path=ds)
    cur_ds_dict = WkwData.convert_ds_to_dict(cur_ds)
    # all pathes use the artifact_pred dataset
    for s in cur_ds_dict:
        cur_source = cur_ds_dict[s]
        cur_source['input_path'] = dataset_path
        cur_source['target_path'] = dataset_path
        cur_ds_dict[s] = cur_source
    # Write out the jsons
    cur_ds_corrected_list = WkwData.convert_ds_to_list(datasources_dict=cur_ds_dict)
    WkwData.datasources_to_json(datasources=cur_ds_corrected_list, json_path=ds)
Ejemplo n.º 19
0
import os
import torch
from torch.utils.data.sampler import SubsetRandomSampler

from genEM3.data.wkwdata import WkwData, DataSplit
from genEM3.model.autoencoder2d import Encoder_4_sampling_bn_1px_deep_convonly_skip, AE_Encoder_Classifier, Classifier
from genEM3.training.classifier import Trainer
from genEM3.util.path import get_data_dir
# Parameters
run_root = '/conndata/alik/genEM3_runs/ae_classifier'
cache_HDD_root = os.path.join(run_root, '.cache/')
datasources_json_path = os.path.join(get_data_dir(),
                                     'debris_clean_datasource.json')
state_dict_path = '/conndata/alik/genEM3_runs/ae_v05_skip/epoch_60/model_state_dict'
input_shape = (140, 140, 1)
output_shape = (140, 140, 1)

data_split = DataSplit(train=0.70, validation=0.20, test=0.10)
cache_RAM = True
cache_HDD = True
batch_size = 64
num_workers = 0

data_sources = WkwData.datasources_from_json(datasources_json_path)
dataset = WkwData(input_shape=input_shape,
                  target_shape=output_shape,
                  data_sources=data_sources,
                  data_split=data_split,
                  cache_RAM=cache_RAM,
                  cache_HDD=cache_HDD,
                  cache_HDD_root=cache_HDD_root)
Ejemplo n.º 20
0
import os

from genEM3.data.wkwdata import WkwData
from genEM3.util.path import get_data_dir

# Read the two jsons
target_names = ['Debris', 'Myelin']
json_names = ['combined_20K_patches.json', 'combined_20K_patches_v2.json']
full_names = [
    os.path.join(get_data_dir(), 'combined', f_name) for f_name in json_names
]
ds_list = [WkwData.read_short_ds_json(name) for name in full_names]
ds_dict = [WkwData.convert_ds_to_dict(ds) for ds in ds_list]
# Get the difference between the two data sources from jsons
diff_sources = WkwData.compare_ds_targets(two_datasources=ds_dict,
                                          source_names=json_names,
                                          target_names=target_names)
print(diff_sources)