Esempio n. 1
0
def generate_ridge_data(filename):
    """Generates a ridge data file from a .nii.gz file

    Args:
        filename (str): Filename to evaluate

    Returns:
        ndarray: 3-dimensional array of normalized ridge scores
    """
    # extract image data from file
    background_img = nib.load(filename)
    background_data = background_img.get_fdata()

    # generate ridge scores
    ridge_data = detect_ridges_concurrent(background_data)

    # save new image
    base_filename = pattern.search(filename).group(1)
    ridge_data_filename = f"{base_filename}_ridgeScore.nii.gz"
    iu.save_image(ridge_data, ridge_data_filename, background_img.affine)

    # normalize and save normalized data
    normalized_ridge_data = iu.normalize(ridge_data)
    normalized_ridge_data_filename = os.path.join(
        f"{base_filename}_ridgeScore_normalized.nii.gz")
    iu.save_image(normalized_ridge_data, normalized_ridge_data_filename,
                  background_img.affine)

    # return normalized ridgescores
    return ridge_data
Esempio n. 2
0
async def get_cached_tile_buffer(prefix: str,
                                 var: str,
                                 z: int,
                                 x: int,
                                 y: int,
                                 colorize: str = None):
    print("calculate tile metadata")
    tile_meta = mercantile.Tile(x=x, y=y, z=z)
    print("getting bounds")
    min_lon, min_lat, max_lon, max_lat = mercantile.bounds(tile_meta)

    print("loading data array")
    da = await load_da(prefix, var)

    print("indexing tile selection")
    data_tile = da.sel(longitude=slice(min_lon, max_lon),
                       latitude=slice(max_lat, min_lat))

    print("getting min/max")
    abs_min, abs_max = get_absolute_min_max(prefix, var)

    print("Slicing and dicing data")
    normalized_tile = resize(
        normalize(data_tile, max_val=abs_max, min_val=abs_min))

    print("encoding png")
    return await encode_as_png(normalized_tile, colorize=colorize)
Esempio n. 3
0
 def step(self, action):
     action = int(action)
     self._send(CMD_STEP)
     self._recv_bytes()
     self._send_action(action)
     reward, is_done = self._recv_step_json_data()
     image = self._recv_state_image()
     image = normalize(image)
     return image, reward, is_done
Esempio n. 4
0
def get_clover_dataset(path, size):
    '''
    クローバー画像を ImageDataSet オブジェクトとして取得する

    :param path: positive/negative フォルダを含む保存先パス
    :param size: モデルに渡す画像ファイルのサイズ(ピクセル)
    :return: ImageDataSet オブジェクト
    '''
    pos_images = np.array([
        np.asarray(img).ravel() for img in normalize(path + '/positive', size)
    ])
    pos_labels = np.full(shape=pos_images.shape[0], fill_value=1)

    neg_images = np.array([
        np.asarray(img).ravel() for img in normalize(path + '/negative', size)
    ])
    neg_labels = np.full(shape=neg_images.shape[0], fill_value=0)

    images = np.concatenate((pos_images, neg_images))
    labels = np.concatenate((pos_labels, neg_labels)).reshape(-1, 1)

    return ImageDataSet(images, labels)
def main():
    for i in range(0, N):
        img_name = "../../../input_image/basket/images/%03d.png" % (i)
        # img_name = "../test_data/small/%03d_small.png" % (i)
        img = cv2.imread(img_name)
        gray_image = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
        img_list.append(utils.normalize(gray_image))

    gradient_map = my_gradient_map.gradient_map(img_list)

    cv2.imwrite('output_diffuse.png', utils.denormalize(gradient_map))
    cv2.imshow('image', gradient_map)
    cv2.waitKey(0)
Esempio n. 6
0
def generate_batch_variable(image_generator, use_cuda, batch_size):
    batch = []

    for _ in range(batch_size):
        image = next(image_generator)
        image = image_utils.normalize(image)
        batch.append(image)

    batch = np.stack(batch).astype(np.float32)
    batch = np.transpose(batch, (0, 3, 1, 2))  # (batch, y, x, channel) -> (batch, channel, y, x)
    batch = torch.from_numpy(batch)

    if use_cuda:
        batch = batch.cuda()

    batch = torch.autograd.Variable(batch, requires_grad=False)

    return batch
Esempio n. 7
0
def train_step(image, extractor, optimizer, weights, targets, num_layers,
               shift):
    with tf.GradientTape() as tape:
        # Get current activations
        outputs = extractor(image)

        loss = style_content_loss(outputs, weights['style'],
                                  weights['content'], weights['frame'],
                                  num_layers['style'], num_layers['content'],
                                  targets['style'], targets['content'],
                                  targets['frame'])

        # Add denoise loss
        loss += weights['denoise'] * denoise_loss(image, shift)

    # Use selected optimizer to descend gradient
    grad = tape.gradient(loss, image)
    optimizer.apply_gradients([(grad, image)])

    # Return the network's new values for the image and clip between 0 and 1
    return images.normalize(image)
Esempio n. 8
0
def generate_fake_image(image, generator_net, use_cuda):
    image = image_utils.normalize(image)
    image = image[np.newaxis, :, :, :]

    image = np.transpose(
        image,
        (0, 3, 1, 2))  # (batch, y, x, channel) -> (batch, channel, y, x)
    image = torch.from_numpy(image)
    if use_cuda:
        image = image.cuda()
    image = torch.autograd.Variable(image, requires_grad=False)

    fake = generator_net(image)

    fake = fake[0, :, :, :]
    if use_cuda:
        fake = fake.cpu()
    fake = fake.data.numpy()
    fake = np.transpose(fake, (1, 2, 0))  # (channel, y, x) -> (y, x, channel)
    fake = image_utils.unnormalize(fake[:, :, :])

    return fake
Esempio n. 9
0
def predict(input_path, output_path, model, model_dir, chip_size, channels, grids, batch_size, params):
	
	input_dataset = gdal.Open(input_path)

	image = image_utils.load_file(input_path)[:, : , :channels]

	image_predicted = np.zeros((image.shape[0], image.shape[1]), dtype=np.int)
	
	params['batch_size'] = batch_size

	if model not in models.keys():
		raise ValueError('Model ' + model + ' is not supported')

	model_fn=models[model]

	estimator = tf.estimator.Estimator(model_fn=model_fn, params=params, model_dir=model_dir)

	for step in image_utils.get_grids(grids, chip_size):
		batch = []
		for (x, y, window, original_dimensions) in image_utils.sliding_window(image, step["steps"], step["chip_size"], (chip_size, chip_size)):
			if window.shape[0] != chip_size or window.shape[1] != chip_size:
				print(window.shape, chip_size)
				continue

			window_normalized = image_utils.normalize(window)

			batch.append({
				"window": window_normalized,
				"x": x,
				"y": y,
				"dimensions": original_dimensions
			})

			if len(batch) >= batch_size:
				windows = []
				positions = []
				dimensions = []
				for b in batch:
					windows.append(b.get("window"))
					positions.append((b.get("x"), b.get("y")))
					dimensions.append(b.get("dimensions"))
			
				windows = np.array(windows)

				predict_input_fn = tf.estimator.inputs.numpy_input_fn(
					x={"data": np.array(windows, dtype=np.float32)},
					shuffle=False
				)

				pred = estimator.predict(input_fn=predict_input_fn)

				for window, position, dimension, predict in zip(windows, positions, dimensions, pred):
					predict[predict > 0.5] = 1
					predict[predict <=  0.5] = 0

					predict = image_utils.resize(predict, (dimension[0], dimension[1]), preserve_range=True, anti_aliasing=True).astype(np.int8)			
					predict = predict.reshape((predict.shape[0], predict.shape[1]))

					predicted = image_utils.get_window(image_predicted, position[0], position[1], predict.shape[1], predict.shape[0])			
				
					if predict.shape != predicted.shape:
						import ipdb; ipdb.set_trace()
					try:
						image_utils.set_window(image_predicted, np.add(predict, predicted), position[0], position[1])
					except Exception as e:
						import ipdb; ipdb.set_trace()
				batch = []

	driver = input_dataset.GetDriver()
	output_dataset = driver.Create(output_path, image.shape[1], image.shape[0], 1, gdal.GDT_Int16)
	output_dataset.SetGeoTransform(input_dataset.GetGeoTransform())
	output_dataset.SetProjection(input_dataset.GetProjection())
	output_band = output_dataset.GetRasterBand(1)
	output_band.WriteArray(image_predicted.reshape((image_predicted.shape[0], image_predicted.shape[1])), 0, 0)
	output_band.FlushCache()
Esempio n. 10
0
 def reset(self):
     self._send(CMD_RESET)
     image = self._recv_state_image()
     image = normalize(image)
     return image
    image_df = pd.read_csv(images_df_path)
    id = max(image_df['id'].tolist())
else:
    image_df = pd.DataFrame(columns=['id', 'image'])
    id = 0

filename, extension = os.path.basename(image_path).split('.')

filename = filename.replace('_', '-')

for (x, y, window) in sliding_window(image, image_size):
    chip = np.array(window[:, :, :image_channels])

    chip = np.flipud(chip)
    chip = apply_contrast(chip)
    chip = normalize(chip)
    chip = chip * 255

    left = extent[0] + (x * spatial_resolution_x)
    right = left + (image_size * spatial_resolution_x)
    top = extent[3] - (y * spatial_resolution_y)
    bottom = top - (image_size * spatial_resolution_y)

    chip_extent = [left, right, bottom, top]

    wkt = "POLYGON (({left} {bottom}," \
          "{left} {top}," \
          "{right} {top}," \
          "{right} {bottom}," \
          "{left} {bottom}))" \
        .format(left=left, right=right, top=top, bottom=bottom)