Ejemplo n.º 1
0
    def __init__(self, Generator, Discriminator, opt):
        self.Generator = Generator
        self.Discriminator = Discriminator
        self.opt = opt

        ### Set parameters for the training of the 0th layer
        self.Gs = []  # Generator list for each scale
        self.Zs = []  # Optimal noise list for each scale [z*, 0, 0, ..., 0]
        self.NoiseAmp = [
        ]  # Ratio of noise when merging with the output of the previous layer for each scale
        self.in_s = 0  # 0 Tensor with the downsampled dimensions of the input image for scale 0

        ### TrainedModel Directory
        dir2save = generate_dir2save(self.opt)
        if (os.path.exists(dir2save)):
            print(
                "Would you look at that, the TrainedModel directory already exists!"
            )
        else:
            try:
                os.makedirs(dir2save)
            except OSError:
                print("Making the directory really didn't work out, hyelp")

        # In case we're not training, load existing model
        if self.opt.mode != 'train':
            self.Gs, self.Zs, _, _, self.NoiseAmp = load_trained_pyramid(
                self.opt)

            # We might wish to replace content or style images
            if self.opt.test_content is not None:
                self.opt.content = self.opt.test_content

            if self.opt.test_style is not None:
                self.opt.style = self.opt.test_style

        ### Content image pyramid
        self.real_ = read_image(self.opt)
        self.style_ = read_image(self.opt, style=True)

        if self.style_.shape != self.real_.shape:
            self.style_ = imresize_to_shape(
                self.style_, [self.real_.shape[2], self.real_.shape[3]], opt)
            self.style_ = self.style_[:, :, :self.real_.shape[2], :self.real_.
                                      shape[3]]

        # "adjust_scales2image" also arranges network parameters according to input dimensions
        assert self.real_.shape == self.style_.shape
        self.real = adjust_scales2image(self.real_, self.opt)
        self.reals = create_reals_pyramid(self.real, self.opt)

        self.style = imresize(self.style_, self.opt.scale1, self.opt)
        self.styles = create_reals_pyramid(self.style, self.opt)
Ejemplo n.º 2
0
def crop_images_according_to_annotations(dir_annotations, dir_images, dir_out):

    list_files = utilities.get_list_of_files(dir_annotations)
    utilities.check_output_dir(dir_out)
    print("Cropping and saving {} images.".format(len(list_files)))

    for i in range(0, len(list_files)):
        imagename = list_files[i].replace(".txt", ".png")
        image = utilities.read_image(dir_images, imagename)
        annotation = utilities.read_anno(dir_annotations, list_files[i])

        cropped_image = image[int(annotation[1]):int(annotation[3]),
                              int(annotation[0]):int(annotation[2])]

        utilities.save_image(dir_out, imagename, cropped_image)
Ejemplo n.º 3
0
 def __init__(self, image_modality, image_path_or_object):
     """
     image_modality: one of the statics fields defined within the class
     image_path_or_object: An ITK image object or the dicom/image path.
     """
     if self.check_modality(image_modality):
         self.image_modality = image_modality
     else:
         print('Incorrect image Modality ' + image_modality +
               ". Specify one from " + self.modalities_list)
         return None
     self.itk_image = read_image(image_path_or_object)
     self.label_mask = None
     self.label_mask_array = None
     self.resample_image = None
     self.resample_image_array = None
Ejemplo n.º 4
0
def select_obj_frame(filename):
    """
    This method reads the image and displays user each block. Then user choses
    a block with higher objects of interest and enters that block number to
    use the interactive segmentation method.

    :param filename: full filepath of the image
    :type filename: str
    :return: None
    """
    global img_list, base_mean, break_flag

    def close_event(evt):
        global break_flag
        print("Closed figure")
        break_flag = True

    block_size = (1024, 1024)
    img = read_image(filename)
    print("FILE: {}".format(filename))
    img_list, _ = split_image(img, block_size)
    frame_no = 0
    fig = plt.figure()
    fig.canvas.mpl_connect('close_event', close_event)
    for img in img_list:
        rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        try:
            plt.imshow(rgb_image)
            plt.title('Frame No %s' % frame_no)
            frame_no += 1
            plt.show()
            plt.grid(b=False)
            plt.waitforbuttonpress(.5)
        except:
            break_flag = True
            break
        if break_flag:
            break
    inp = get_input("Enter Frame number : ")
    if len(inp) != 0:
        frame_no = int(inp)
    if 0 <= frame_no < len(img_list):
        get_obj_color(img_list[frame_no])
Ejemplo n.º 5
0
    print(Style.RESET_ALL)

    images = files.get_images(root_path)  # Note line below resets the blue
    print(Style.RESET_ALL + Fore.GREEN +
          "Number of images in the path: {:d}".format(len(images)))

    # Create the image matrix here
    height, width = 30, 30
    img_matrix = np.zeros((len(images), height * width))  # (num_imgs x dim)

    # Going through all the images and polutating the img_matrix
    size = 20  # Length of the loading bar in chars
    for i, img_path in enumerate(images):

        # Open each image
        img = ut.read_image(img_path, "BGR2GRAY")
        resized = cv2.resize(img, (height, width), cv2.INTER_AREA)

        reshaped = resized.reshape(width * height)  # So we have it as a vector

        # populate the matrix
        img_matrix[i, :] = reshaped

        # Progress bar stuff
        perc = (i + 1) / len(images)
        bar = int(np.round(perc * size))
        line = "Opening images ["
        line += "=" * bar + " " * (size - bar)
        line += "] {:d}%".format(int(np.round(perc * 100)))
        ut.update_line(line)  # This func will use carriage return
    print("\nFinding the principal components.")
Ejemplo n.º 6
0
	# Print the resizing settings
	print("Resizing set to {0}{1}{2}.".format(Fore.GREEN, resize, Fore.BLUE))
	if resize:
		print("Width: {0}{1}px{2}".format(Fore.WHITE, width, Fore.BLUE))
		print("Height: {0}{1}px{2}".format(Fore.WHITE, height, Fore.BLUE))

	# Reset colour
	print(Style.RESET_ALL)

	images = files.get_images(root_path) # Note line below resets the blue 
	print(Style.RESET_ALL + Fore.GREEN + "Number of images in the path: {:d}".format(len(images)))
	
	size = 20 # Length of the loading bar in chars
	for i, img_path in enumerate(images):
		image = ut.read_image(img_path, "BGR2RGB") #grayscale

		# Do the image cropping and save to the output
		rect = an.get_rect(image) # Ruple with bounding rect coords
		cropped = image[rect[1]:rect[3],rect[0]:rect[2]] # Simple cropping

		# This is the resizing stuff. Here I am assuming we
		# will be decimating the image (reducing size), hence
		# the interpolation used is the AREA relation.
		if resize:
			inter = cv2.INTER_AREA # OpenCV const for such interpolation
			cropped = cv2.resize(cropped, (width, height), interpolation=inter)


		# Saving the processed file 
		name = files.get_filename(img_path) + ".jpg"
Ejemplo n.º 7
0
	Dist = Xs*Xs + Ys*Ys
	mask = Dist < r**2

	#############################################################
	# Params for fitting the curve to the image and displaying it
	max_weight = 5 # Max weights of points (0-max)
	Xs = np.linspace(0, 450, 2000) # The linspace X for drawing line

	# The hypothesis function 
	def func(x, a, b, c): # So we can fit a parabola 
		return a*x**2 + b*x + c

	# Loading all the images in the path and detecting the OD in them
	for i, img_path in enumerate(images):
		if i > 129 :
			original_image = ut.read_image(img_path, "BGR2GRAY")
			original_size = original_image.shape[0:2] # Stores the original size for later
			S = 450 # the square size of the image

			# Resize the image (with dims (450,450))
			img_resized = cv2.resize(original_image, (S,S), cv2.INTER_AREA)
			masked_img = img_resized*mask

			# Sliding algorithm (y,x is the center pixel)
			offset = np.max([width, height])//2 + 2 # The offset from the boarders
			for y in range(offset, N-offset, 5):
			    for x in range(offset, N-offset, 5):
			        
			        # Tuples to represent the window box
			        starts = np.array([y-height/2, x-width/2], dtype=int)
			        stops = np.array([starts[0]+height, starts[1]+width], dtype=int)
Ejemplo n.º 8
0
 def set_label_mask(self, labels_mask_image):
     self.label_mask_image = read_image(
         labels_mask_image
     ) if labels_mask_image is not None else self.get_labels_mask()
Ejemplo n.º 9
0
""" 
File to carry out the Fourier Transform on images and produce a magnitude image to be analyised and altered.
The inverse Fourier Transform can be applied to the altered magnitude image to recreate the original image but sharper.
"""
import numpy as np
import utilities

# Load image and convert to grayscale
img = utilities.read_image("osc.jpg", "BGR2GRAY")
size = img.shape

# Fourier Transform
# CURRENTLY FOR ONLY ONE PIXEL takes a lot of time to cycle through for all the pixels in an image
def fourier(u, v, size):
    
    # get dimensions
    N = size[0] # rows
    M = size[1] # columns
    
    f = 0
    for x in range(M):
        for y in range(N):
            f += np.sum(img[x,y] * exp(j*2*pi * ((u*x)/N + (v*y)/M)))
    
    # get the magnitude from the value of F
    # MAGNITUDE(F) = SQRT( REAL(F)^2+IMAGINARY(F)^2 )
    magnitude = np.absolute(f)
    
    return magnitude

Ejemplo n.º 10
0
def main():
    #print(f'datetime.datetime={str(datetime.datetime)}, datetime.date={str(datetime.date)}, strftime():{datetime.datetime.now().strftime("%Y%d%H%M%S")}')
    thrash = True
    print('tf version:{0}'.format(tf.VERSION))
    print('tf.keras version:{0}'.format(tf.keras.__version__))
    start_time = datetime.datetime.now().strftime("%Y%d%H%M%S")
    flags, unparsed = ut.parseArgs()
    print(flags)
    SAMPLE_FILE = flags.train_data_path + flags.sample + '.' + flags.img_file_extension
    img = ut.read_image(filename=SAMPLE_FILE, show=False)
    img = np.array(img)
    if thrash == True:
        img = ut.thrash_img(img)

    IMG_SHAPE=img.shape
    (x_train, y_train), (x_test, y_test)=ut.load_data(numclasses=flags.numclasses, train_path=flags.train_data_path, test_path=flags.test_data_path, onehot=True, extension=flags.img_file_extension)

    print('IMG_SHAPE:{0},  y_train shape:{1}'.format(IMG_SHAPE,y_train[0].shape))

    if flags.load_model:
        model = ut.load_stored_model(name=flags.model_dir + flags.model_name)
    elif flags.model == 'dense':
        model = ut.make_dense_model(flags=flags)
    elif flags.model  == 'conv2d':
        model = ut.make_convnet_model(flags=flags, shape=IMG_SHAPE)
    else:
        print('No model, no hope. Quitting...')
        return

    if flags.load_data:
        model = ut.load_stored_data(model=model, date_file_name=flags.data_dir + flags.data_name)

    print('Saving in {0}'.format(flags.tb_dir + start_time))
    tensorboard = TensorBoard(log_dir=flags.tb_dir + '{0}'.format(start_time))

    adam=tf.keras.optimizers.Adam(lr=flags.learning_rate)

    model.compile(optimizer=adam,
                  loss=flags.loss,
                  metrics=[flags.metric]
                  )

    if flags.train == True:
        print('Training...')
        scores = []
        for epoch in range(flags.epochs):
            print('Epoch:{0} of {1}'.format(epoch+1, flags.epochs))
            n = len(x_train)
            for batch in range(0,len(x_train), flags.batch_size):
                print('Batch {0} of {1}, epoch {2} of {3}.'.format(batch+1,n+1, epoch+1, flags.epochs))
                bunch_x, bunch_y = x_train[batch:batch+flags.batch_size], y_train[batch:batch+flags.batch_size]
                if len(bunch_x) < flags.batch_size: # skip partial batches
                    print('Skipping {0} samples..'.format(len(bunch_x)))
                    continue

                xs = []
                ys = []
                print("Iterating {0} samples".format(len(bunch_x)))
                for datum in range(len(bunch_x)):
                    file = bunch_x[datum]
                    img = ut.read_image(filename=flags.train_data_path+file, show=False)
                    img=np.array(img)
                    if thrash == True:
                        img = ut.thrash_img(img)
                    xs.append(img)
                    ys.append(bunch_y[datum])

                X= np.stack(xs, axis=0)
                Y= np.stack(ys, axis=0)

                score_before = model.evaluate(x=X,y=Y, batch_size=flags.batch_size)

                _ = model.fit(x=X, y=Y, shuffle=flags.shuffle, callbacks=[tensorboard])

                score_after = model.evaluate(x=X,y=Y, batch_size=flags.batch_size)

                if score_before == score_after:
                    print("Scores before and after training are identical")

                scores.append(score_after)
                if epoch == 0 and batch == 0:
                    model.summary()

                print('Score:{0}'.format(score_after))

            loss,acc = np.array([s[0] for s in scores]), np.array([s[1] for s in scores])
        print("Average loss:{0}  Average accuracy:{1}%".format(np.mean(loss), 100*np.mean(acc)))

    if flags.save_model:
        model_name = flags.model_name if flags.model_name != None else start_time
        ut.save_model(model, flags.model_dir+model_name)
        print('Saved model to disk, json in {0}'.format(flags.model_dir + model_name + ".json"))

    if flags.save_data:
        data_name = flags.data_name if flags.data_name != None else start_time
        model.save_weights(flags.data_dir + data_name + ".h5")
        print('Saved data to disk in {0}'.format(flags.model_dir + data_name + ".h5"))

    test_scores = []
    predictions = []
    if flags.evaluate or flags.predict:
        n = len(x_test)
        nTotal = 0
        sums_array = None
        for batch in range(0, len(x_test), flags.batch_size):
            print('Batch {0} of {1}.'.format(batch+1, n+1))
            bunch_x, bunch_y = x_test[batch:batch + flags.batch_size], y_test[batch:batch + flags.batch_size]
            if len(bunch_x) < flags.batch_size:  # skip partial batches
                print('Skipping {0} samples..'.format(len(bunch_x)))
                continue

            xs = []
            ys = []
            for d in range(len(bunch_x)):
                file = bunch_x[d]
                img = ut.read_image(filename=flags.test_data_path + file, show=False)
                img = np.array(img)
                if thrash == True:
                    img = ut.thrash_img(img)
                xs.append(img)
                ys.append(bunch_y[d])

            X = np.stack(xs, axis=0)
            Y = np.stack(ys, axis=0)

            if flags.evaluate:
                score = model.evaluate(x=X, y=Y, batch_size=flags.batch_size)
                test_scores.append(score)
                print('Test score:{0}'.format(score))


            if flags.predict:
                prediction = model.predict(X, verbose=2)
                processed_predictions = ut.process_predictions(prediction, Y)

                for pp in processed_predictions:
                    if sums_array is None:
                        sums_array = np.zeros_like(pp)
                    sums_array = np.add(sums_array, pp)
                    nTotal = nTotal+1

                pass

        if flags.predict:
            sums_array /= nTotal

        if predictions != None:
            pass


        print('Average score:{0},{1}'.format(np.mean([test_scores[i][0] for i in range(len(test_scores))]),np.mean([test_scores[i][1] for i in range(len(test_scores))])))

        if flags.show_results:
            y_axis = np.arange(0, 1.0, 1.0/float(len(sums_array)))
            plt.plot(y_axis,sums_array)
            plt.show()

    pass
Ejemplo n.º 11
0
    images = files.get_images(root_path)  # Note line below resets the blue
    print(Style.RESET_ALL + Fore.GREEN +
          "Number of images in the path: {:d}".format(len(images)))

    # CALCULATE THE QUALITIES OF THE IMAGES
    hist_size = 256
    histograms = np.zeros(
        (len(images), hist_size))  # Define matrix for storing histograms
    stats = np.zeros(
        (len(images),
         2))  # Define matrix for storing laplacian variance and ERR

    size = 20  # Length of the loading bar in chars
    for i, img_path in enumerate(images):
        image_gray = ut.read_image(img_path, "BGR2GRAY")  #grayscale
        lp = an.get_laplacian(image_gray)  # Laplacian
        err = an.get_err(image_gray)  # Err

        # Fill the matrices (Log domain)
        histograms[i, :] = an.get_histogram(image_gray).ravel()
        stats[i, 0] = np.log(np.var(lp))
        stats[i, 1] = np.log(err)

        # Progress bar stuff
        perc = (i + 1) / len(images)
        bar = int(np.round(perc * size))
        line = "Processing ["
        line += "=" * bar + " " * (size - bar)
        line += "] {:d}%".format(int(np.round(perc * 100)))
        ut.update_line(line)  # Thins func will use carriage return