Beispiel #1
0
def compute_scores(iterations):

	labels_key = ArrayKey('LABELS')
	gt_affs_key = ArrayKey('GT_AFFINITIES')
	pred_affinities_key = ArrayKey('PREDICTED_AFFS')
	sample_z_key = ArrayKey("SAMPLE_Z")

	voxel_size = Coordinate((1, 1, 1))
	input_shape = Coordinate((132, 132, 132)) * voxel_size
	output_shape = Coordinate((44, 44, 44)) * voxel_size
	sample_shape = Coordinate((1, 1, 6)) * voxel_size

	print ("input_size: ", input_shape)
	print ("output_size: ", output_shape)

	request = BatchRequest()
	# request.add(labels_key, output_shape)
	request.add(gt_affs_key, output_shape, )
	request.add(pred_affinities_key, input_shape)
	# request.add(sample_z_key, sample_shape)

	dataset_names = {
		gt_affs_key: 'volumes/gt_affs',
		pred_affinities_key: 'volumes/pred_affs'
	}

	array_specs = {
		gt_affs_key: ArraySpec(interpolatable=True),
		pred_affinities_key: ArraySpec(interpolatable=True)
	}

	pipeline = tuple(
		Hdf5Source(
            os.path.join(data_dir, sample + '.hdf'),
            datasets = dataset_names,
            array_specs = array_specs
        ) +
        Pad(gt_affs_key, None) +
        Pad(pred_affinities_key, None)
        # Pad(merged_labels_key[i], None) for i in range(num_merges) # don't know why this doesn't work
        for sample in samples
	)

	pipeline += SequentialProvider()

	pipeline += Snapshot(
			dataset_names={
				gt_affs_key: 'volumes/gt_affs',
				# pred_affinities_key: 'volumes/pred_affs',
				# sample_z_key: 'volumes/sample_z',
			},
			output_filename='test_scores.hdf',
			every=1,
			dataset_dtypes={
				gt_affs_key: np.float32,
				# pred_affinities_key: np.float32,
				# sample_z_key: np.float32,
			})



	print("Calculating Scores...")
	with build(pipeline) as p:
		aris = []
		for i in range(iterations):
			if i % 10 == 0:
				print("iteration: ", i)
			req = p.request_batch(request)
			gt_affs = np.array(req[gt_affs_key].data)
			pred_affs = threshold(__crop_center(np.array(req[pred_affinities_key].data), (44,44,44)))

			aris.append(adjusted_rand_score(gt_affs.flatten(), pred_affs.flatten()))
			print ("ari: ", aris[i])

		# print ("aris: ", aris)
		aris = np.array(aris)
		maximum = np.max(aris)

		minimum = np.min(aris)
		std = np.std(aris)
		mean = np.mean(aris)
		upper_std = mean + std
		lower_std = mean - std
		print ("maximum: ", maximum)
		print ("minimum: ", minimum)
		print ("mean: ", mean)
		print ("std: ", std)
		print ("upper_std: ", upper_std)
		print ("lower_std: ", lower_std)

	with open("ari/" + setup_name + ".txt", "wb") as fp:   #Pickling
		pickle.dump(aris, fp)
	print("Score calculation finished")
def predict(checkpoint, iterations):

    print("checkpoint: ", checkpoint)

    labels_key = ArrayKey('LABELS')
    gt_affs_key = ArrayKey('GT_AFFINITIES')
    raw_affs_key = ArrayKey('RAW_AFFINITIES')
    raw_joined_affs_key = ArrayKey('RAW_JOINED_AFFINITIES')
    raw_key = ArrayKey('RAW')
    pred_affinities_key = ArrayKey('PREDICTED_AFFS')
    sample_z_key = ArrayKey("SAMPLE_Z")
    # broadcast_key = ArrayKey("BROADCAST")
    # pred_logits_key = ArrayKey("PRED_LOGITS")
    # sample_out_key = ArrayKey("SAMPLE_OUT")
    # debug_key = ArrayKey("DEBUG")

    voxel_size = Coordinate((1, 1, 1))
    input_shape = Coordinate(config['input_shape']) * voxel_size
    output_shape = Coordinate(config['output_shape']) * voxel_size
    sample_shape = Coordinate((1, 1, 6)) * voxel_size
    # debug_shape = Coordinate((1, 1, 5)) * voxel_size

    print("input_size: ", input_shape)
    print("output_size: ", output_shape)

    request = BatchRequest()
    request.add(labels_key, output_shape)
    request.add(gt_affs_key, input_shape)
    request.add(raw_affs_key, input_shape)
    request.add(raw_joined_affs_key, input_shape)
    request.add(raw_key, input_shape)
    request.add(pred_affinities_key, output_shape)
    # request.add(broadcast_key, output_shape)
    request.add(sample_z_key, sample_shape)
    # request.add(pred_logits_key, output_shape)
    # request.add(sample_out_key, sample_shape)
    # request.add(debug_key, debug_shape)

    dataset_names = {
        labels_key: 'volumes/labels',
    }

    array_specs = {labels_key: ArraySpec(interpolatable=False)}

    pipeline = tuple(
        Hdf5Source(os.path.join(data_dir, sample + '.hdf'),
                   datasets=dataset_names,
                   array_specs=array_specs) + Pad(labels_key, None)
        # Pad(merged_labels_key[i], None) for i in range(num_merges) # don't know why this doesn't work
        for sample in samples)

    pipeline += (
        # Pad(raw_key, size=None) +
        # Crop(raw_key, read_roi) +
        #Normalize(raw_key) +
        SequentialProvider() +
        AddAffinities(affinity_neighborhood=neighborhood,
                      labels=labels_key,
                      affinities=raw_affs_key) +
        AddJoinedAffinities(input_affinities=raw_affs_key,
                            joined_affinities=raw_joined_affs_key) +
        AddRealism(joined_affinities=raw_joined_affs_key,
                   raw=raw_key,
                   sp=0.25,
                   sigma=1,
                   contrast=0.7) +
        GrowBoundary(labels_key, steps=1, only_xy=True) +
        AddAffinities(affinity_neighborhood=neighborhood,
                      labels=labels_key,
                      affinities=gt_affs_key) +
        PreCache(cache_size=32, num_workers=8) +
        IntensityScaleShift(raw_key, 2, -1) + Predict(
            checkpoint=os.path.join(setup_dir,
                                    'train_net_checkpoint_%d' % checkpoint),
            inputs={config['raw']: raw_key},
            outputs={
                config['pred_affs']: pred_affinities_key,
                config['sample_z']: sample_z_key,
                # config['broadcast']: broadcast_key,
                # config['pred_logits']: pred_logits_key,
                # config['sample_out']: sample_out_key,
                # config['debug']: debug_key
            },
            graph=os.path.join(setup_dir, 'predict_net.meta')) +
        IntensityScaleShift(array=raw_key, scale=0.5, shift=0.5) + Snapshot(
            dataset_names={
                # labels_key: 'volumes/labels',
                gt_affs_key: 'volumes/gt_affs',
                # raw_key: 'volumes/raw',
                pred_affinities_key: 'volumes/pred_affs',
                # broadcast_key: 'volumes/broadcast',
                # sample_z_key: 'volumes/sample_z',
                # pred_logits_key: 'volumes/pred_logits',
                # sample_out_key: 'volumes/sample_out'
            },
            output_filename='prob_unet/' + setup_name +
            '/prediction_A_{id}.hdf',
            every=1,
            dataset_dtypes={
                # labels_key: np.uint16,
                gt_affs_key: np.float32,
                pred_affinities_key: np.float32,
                # broadcast_key: np.float32,
                # sample_z_key: np.float32,
                # pred_logits_key: np.float32,
                # sample_out_key: np.float32
            })
        # PrintProfilingStats(every=20)
    )

    print("Starting prediction...")
    with build(pipeline) as p:
        for i in range(iterations):
            req = p.request_batch(request)
            # sample_z = req[sample_z_key].data
            # broadcast_sample = req[broadcast_key].data
            # sample_out = req[sample_out_key].data
            # debug = req[debug_key].data
            # print("debug", debug)

            # print("sample_z: ", sample_z)
            # print("sample_out:", sample_out)
            # print("Z - 0")
            # print(np.unique(broadcast_sample[0, 0, :, :, :]))
            # print("Z - 1")
            # print(np.unique(broadcast_sample[0, 1, :, :, :]))
            # print("Z - 2")
            # print(np.unique(broadcast_sample[0, 2, :, :, :]))
            # print("Z - 3")
            # print(np.unique(broadcast_sample[0, 3, :, :, :]))
            # print("Z - 4")
            # print(np.unique(broadcast_sample[0, 4, :, :, :]))
            # print("Z - 5")
            # print(np.unique(broadcast_sample[0, 5, :, :, :]))
    print("Prediction finished")
def compute_scores(d, iterations):

    samples = [
        "prediction_%08i_A" % d,
        "prediction_%08i_B" % d,
        "prediction_%08i_C" % d,
        "prediction_%08i_D" % d
    ]
    labels_key = ArrayKey('LABELS')
    gt_affs_key = ArrayKey('GT_AFFINITIES')
    pred_affinities_key = ArrayKey('PREDICTED_AFFS')
    sample_z_key = ArrayKey("SAMPLE_Z")

    voxel_size = Coordinate((1, 1, 1))
    input_shape = Coordinate((132, 132, 132)) * voxel_size
    output_shape = Coordinate((44, 44, 44)) * voxel_size
    sample_shape = Coordinate((1, 1, 6)) * voxel_size

    request = BatchRequest()
    # request.add(labels_key, output_shape)
    request.add(
        gt_affs_key,
        output_shape,
    )
    request.add(pred_affinities_key, input_shape)
    # request.add(sample_z_key, sample_shape)

    dataset_names = {
        gt_affs_key: 'volumes/gt_affs',
        pred_affinities_key: 'volumes/pred_affs'
    }

    array_specs = {
        gt_affs_key: ArraySpec(interpolatable=True),
        pred_affinities_key: ArraySpec(interpolatable=True)
    }

    pipeline = tuple(
        Hdf5Source(os.path.join(data_dir, sample + '.hdf'),
                   datasets=dataset_names,
                   array_specs=array_specs) + Pad(gt_affs_key, None) +
        Pad(pred_affinities_key, None)
        # Pad(merged_labels_key[i], None) for i in range(num_merges) # don't know why this doesn't work
        for sample in samples)

    pipeline += SequentialProvider()

    # pipeline += Snapshot(
    # 		dataset_names={
    # 			gt_affs_key: 'volumes/gt_affs',
    # 			pred_affinities_key: 'volumes/pred_affs',
    # 			# sample_z_key: 'volumes/sample_z',
    # 		},
    # 		output_filename='test_scores.hdf',
    # 		every=1,
    # 		dataset_dtypes={
    # 			gt_affs_key: np.float32,
    # 			pred_affinities_key: np.float32,
    # 			sample_z_key: np.float32,
    # 		})

    # print("Calculating Scores...")
    with build(pipeline) as p:
        aris = []
        pred_affs = []
        for i in range(iterations):
            req = p.request_batch(request)

            if i == 0:
                gt_affs = np.array(req[gt_affs_key].data)
            pred_affs.append(
                threshold(
                    __crop_center(np.array(req[pred_affinities_key].data),
                                  (44, 44, 44))))

        # print(np.sum(gt_affs))
        # print(np.sum(pred_affs[0]))
        # print(np.sum(pred_affs[1]))
        ari_A = adjusted_rand_score(gt_affs.flatten(), pred_affs[0].flatten())
        ari_B = adjusted_rand_score(gt_affs.flatten(), pred_affs[1].flatten())
        ari_C = adjusted_rand_score(gt_affs.flatten(), pred_affs[2].flatten())
        ari_D = adjusted_rand_score(gt_affs.flatten(), pred_affs[3].flatten())
        ari_YS = (ari_A + ari_B + ari_C + ari_D) / 4
        d_YS = 1 - ari_YS
        # print("gt_ari_avg: ", ari_YS)

        ari_AB = adjusted_rand_score(pred_affs[0].flatten(),
                                     pred_affs[1].flatten())
        ari_AC = adjusted_rand_score(pred_affs[0].flatten(),
                                     pred_affs[2].flatten())
        ari_AD = adjusted_rand_score(pred_affs[0].flatten(),
                                     pred_affs[3].flatten())
        ari_BC = adjusted_rand_score(pred_affs[1].flatten(),
                                     pred_affs[2].flatten())
        ari_BD = adjusted_rand_score(pred_affs[1].flatten(),
                                     pred_affs[3].flatten())
        ari_CD = adjusted_rand_score(pred_affs[2].flatten(),
                                     pred_affs[3].flatten())
        ari_SS = (ari_AB + ari_AC + ari_AD + ari_BC + ari_BD + ari_CD) / 6
        d_SS = 1 - ari_SS
        GED = 2 * d_YS - d_SS
        # print("pred_ari_avg: ", ari_SS)

        # print("GED: ", 2*d_YS - d_SS)
        return (ari_YS, d_SS, GED)