Beispiel #1
0
    def save_result(self, results, **kwargs):
        visuals_np = Visualizer.convert_visuals_to_numpy(
            results, batchSize=1, label_nc=self.opt.label_nc)
        # We only run the demo with batch Size 1, so let's remove the first dimension.
        visuals_np = OrderedDict([(k, v[0]) for k, v in visuals_np.items()])

        filename = self._get_filename(kwargs)
        save_path = os.path.join(self.save_dir, filename)
        save_image(visuals_np["fake_image"], save_path, create_dir=True)

        save_style_matrix(results["encoded_style"][0],
                          "{}.csv".format(save_path[:-4]))
        return self.save_dir
save_path = os.path.join(opt.results_dir, opt.name)
#save_path = os.path.join(opt.results_dir, 'debug_mixed_train')
os.makedirs(save_path, exist_ok=True)

# test
for i, data_i in tqdm(enumerate(dataloader)):
    if i * opt.batchSize >= opt.how_many:
        break

    generated = model(data_i, mode='inference2')

    img_path = data_i['path']
    for b in range(generated.shape[0]):

        visuals = OrderedDict([
            ('input_label', data_i['label'][b]),
            ('synthesized_image', generated[b]),
        ])

        visuals_rgb = visualizer.convert_visuals_to_numpy(visuals)

        name = os.path.splitext(os.path.basename(img_path[b]))[0]
        im1 = visuals_rgb['input_label']
        im2 = visuals_rgb['synthesized_image']

        h = im1.shape[0]
        im = np.zeros((h, h * 2, 3))
        im[:, :h] = im1
        im[:, h:2 * h] = im2

        cv2.imwrite(os.path.join(save_path, name + '.png'), im[:, :, ::-1])
Beispiel #3
0
                    'Experiment = %s, Phase = %s, Epoch = %s' %
                    (opt.name, opt.phase, opt.which_epoch))

# test
for i, data_i in enumerate(dataloader):
    if i * opt.batchSize >= opt.how_many:
        break

    generated = model(data_i, mode='inference')

    img_path = data_i['path']
    for b in range(generated.shape[0]):
        if opt.dataset_mode == 'brats':
            visuals = OrderedDict([('input_label', data_i['label'][b]),
                                   ('synthesized_image', generated[b])])
            visuals = visualizer.convert_visuals_to_numpy(visuals)
            image_numpy = visuals["synthesized_image"]
            util.save_image(image_numpy[:, :, 0], opt.results_dir + str(opt.condition_class) + '/train_t1ce_img_full'
                            + '/{}.png'.format(i * opt.batchSize + b),
                            create_dir=True)
            util.save_image(image_numpy[:, :, 1], opt.results_dir + str(opt.condition_class) + '/train_flair_img_full'
                            + '/{}.png'.format(i * opt.batchSize + b),
                            create_dir=True)
            util.save_image(image_numpy[:, :, 2], opt.results_dir + str(opt.condition_class) + '/train_t2_img_full'
                            + '/{}.png'.format(i * opt.batchSize + b),
                            create_dir=True)
            util.save_image(image_numpy[:, :, 3], opt.results_dir + str(opt.condition_class) + '/train_t1_img_full'
                            + '/{}.png'.format(i * opt.batchSize + b),
                            create_dir=True)
            print('processing t1ce, flair, t2, t1 modalities of index {}'.format(i * opt.batchSize + b))
        else:
Beispiel #4
0
def main():

	global mousePressed, mousePressedLocation, mousePressedOffset, lastMouseLocation, brushSize, selectedSemantic, semantics

	# Create a figure to draw to and connect the mouse functions
	fig = plt.figure()
	fig.canvas.mpl_connect('button_press_event', onpress)
	fig.canvas.mpl_connect('button_release_event', onrelease)
	fig.canvas.mpl_connect('motion_notify_event', mousemove)
	fig.canvas.mpl_connect('scroll_event', scrollevent)
	fig.canvas.mpl_connect('key_press_event', onkey)
	# Create the image that is drawn to the figure
	imgData=np.zeros((256, 256))
	img = plt.imshow(imgData);
	plt.pause(0.05)

	# First, load the model
	opt = TestOptions().parse()
	dataloader = data.create_dataloader(opt)
	model = Pix2PixModel(opt)
	model.eval()
	visualizer = Visualizer(opt)

	# Create a webpage that summarizes the all results
	web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
	webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))

	# Get the format of the input data
	inData = None
	for i, data_i in enumerate(dataloader):
		inData = data_i
		break
	# Initially, Set the input to zero
	inData['image'][:,:,:,:] = 0.0;
	# initially, set the labels to zero
	inData['label'][0,0,:,:] = 0

	tmpImg = np.zeros((1,1,256,256), dtype=np.uint8);

	# While the program is running, generate imagery from input
	while True:

		# Update the mouse location
		if mousePressed:

			# Doesn't work:
			# Get current location
			# cursorPos = win32gui.GetCursorPos()
			# mouseLocation = [cursorPos[0] - mousePressedLocation[0] + mousePressedOffset[0], cursorPos[1] - mousePressedLocation[1] + mousePressedOffset[1]]
			
			# Print the last mouse location
			# print(lastMouseLocation)

			# Draw any mouse movements to the input image
			# inData['label'][0,0] = 0

			cv2.circle(tmpImg[0,0], (int(lastMouseLocation[0]),int(lastMouseLocation[1])), brushSize, (semantics[selectedSemantic][0]-1), -1)
			inData['label'] = torch.tensor(tmpImg)

			# data_i['label']

		# Run a forward pass through the model to "infer" the output image
		generated = model(inData, mode='inference')

		img_path = inData['path']
		for b in range(generated.shape[0]):

			# Extract the visuals
			# print('process image... %s' % img_path[b])
			visuals = OrderedDict([('input_label', data_i['label'][b]),('synthesized_image', generated[b])])

			# Draw the image 
			imgData = visualizer.convert_visuals_to_numpy(visuals)['synthesized_image'];

			# Draw the cursor location and size
			cv2.circle(imgData, (int(lastMouseLocation[0]),int(lastMouseLocation[1])), brushSize, (255, 0, 0), 2)
			img.set_data(imgData);

			# img.set_data(tmpImg[0,0])

			plt.pause(0.05)