Exemplo n.º 1
0
def classify_image(images, mask_list, k_size, save, display):
    """
    Classify pixels of a single image
    """
    if len(images) > 1:
        raise ValueError('Only one image can be classified at once')
    logging.info('Calculating, normalizing feature vectors for image')
    image = images[0]  # First and only member
    vectors = calculate_features(image.image, image.fov_mask, mask_list,
                                 k_size)
    logging.info('Classifying image pixels')
    probabilities, prediction = svm.classify(vectors)
    svm.assess(image.truth, prediction)
    svm.plot_roc(image.truth, probabilities)

    if save:
        image_utils.save_image(prediction, 'prediction.png')
        logging.info('Saved classified image')
    if display:
        image_utils.display_image(prediction)
        logging.info('Displaying classified image')
Exemplo n.º 2
0
def split_squares_average(img: np.ndarray, n_squares: int, marker_size: int):
    """
    Split the image in squares
    :param img: the original image
    :param n_squares: the number of squares
    :param marker_size: the width of the line in pixels
    """
    result_img = img.copy()
    square_size = img.shape[0] // n_squares
    for nx in range(n_squares - 1):
        for ny in range(n_squares - 1):
            current_cell = result_img[square_size * nx:square_size * (nx + 1),
                                      square_size * ny:square_size *
                                      (ny + 1), :]
            for ch in range(3):
                result_img[square_size*nx:square_size*(nx+1), square_size*ny:square_size*(ny+1), ch] = \
                    np.mean(current_cell[:, :, ch])

    for n in range(n_squares):
        result_img[square_size * n:square_size * n + 5, :, :] = 255
        result_img[:, square_size * n:square_size * n + 5, :] = 255
    display_image(result_img, title="test", cmap=None)
Exemplo n.º 3
0
def split_squares_random_direction_sorting(img: np.ndarray, n_squares: int,
                                           marker_size: int):
    """
    Split the image in squares
    :param img: the original image
    :param n_squares: the number of squares
    :param marker_size: the width of the line in pixels
    """
    result_img = img.copy()
    square_size = img.shape[0] // n_squares
    for nx in range(n_squares - 1):
        for ny in range(n_squares - 1):
            current_cell_pixels = result_img[square_size * nx:square_size *
                                             (nx + 1), square_size *
                                             ny:square_size * (ny + 1), :]

            result_img[square_size*nx:square_size*(nx+1), square_size*ny:square_size*(ny+1), :] = \
                pixel_sorting(current_cell_pixels, direction="UP")

    for n in range(n_squares):
        result_img[square_size * n:square_size * n + 5, :, :] = 255
        result_img[:, square_size * n:square_size * n + 5, :] = 255
    display_image(result_img, title="rotation test", cmap=None)
Exemplo n.º 4
0
multiple_folders = []
for folder in count_by_folders:
        if count_by_folders[folder] == 1:
                singleton_folders.append(folder)
        elif count_by_folders[folder] > 1:
                multiple_folders.append(folder)
        else:
                print("EMPTY FOLDER: ", folder)


def get_batch():
        number_of_pairs = ((batch_size + 1) // 2) // 2
        singles = sample(singleton_folders, k=batch_size - number_of_pairs * 2)
        multiples = sample(multiple_folders, k=number_of_pairs)
        batch = []
        for m in multiples:
                ms = sample(image_paths[m], k=2)
                # print(ms)
                for filename in ms:
                        batch.append(image_utils.load_image(os.path.join(m, filename), training_params.shape))
        for s in singles:
                batch.append(image_utils.load_image(os.path.join(s, image_paths[s][0]), training_params.shape))
        return np.array(batch)


if __name__ == '__main__':
        batch = get_batch()
        print(batch.shape)
        image_utils.display_image(image_utils.unpreprocess_image(batch[0]))
        image_utils.display_image(image_utils.unpreprocess_image(batch[1]))
Exemplo n.º 5
0
             temp_mi_loss = 20 * max(
                 -1 * np.average(np.square(mi[i] - mi[j])) + 2., 0)
             # print(i, ",", j, '-NOT A PAIR', temp_mi_loss, np.sum(np.square(mi[i], np.zeros_like(mi[i])))**0.5)
         # print(np.sum(np.square(mi[i] - mi[j]))**0.5)
         running_mi_loss += temp_mi_loss
 print(
     "MI LOSS: ", running_mi_loss / ((params.batch_size *
                                      (params.batch_size - 1)) / 2))
 o, lgx = sess.run([output, loggamma_x],
                   feed_dict={
                       X: next_batch,
                       training: True
                   })
 print(o.shape)
 for j in range(params.batch_size):
     image_utils.display_image(image_utils.unpreprocess_image(o[j]))
     image_utils.display_image(
         image_utils.unpreprocess_image(next_batch[j]))
     print(
         "IMAGE ", j, ": ",
         np.average(
             np.power(10 *
                      (o[j] - next_batch[j]) / np.exp(lgx), 2) /
             2.0 + lgx))
     diff_img = (np.power(o[j] - next_batch[j], 2) - 2.0) / 2.0
     image_utils.display_image(
         image_utils.unpreprocess_image(diff_img))
     if j > 1:
         break
 print(
     "TOTAL LOSS: ",
Exemplo n.º 6
0
def get_image_samples(query):
    image_type = "ActiOn"
    web_query = (query + ' face').split()
    web_query = '+'.join(web_query)
    url = "https://www.google.co.in/search?q=" + web_query + "&source=lnms&tbm=isch"
    print(url)
    #add the directory for your image here
    DIR = "Pictures"
    header = {
        'User-Agent':
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
    }
    soup = get_soup(url, header)

    ActualImages = [
    ]  # contains the link for Large original images, type of  image
    for a in soup.find_all("div", {"class": "rg_meta"}):
        link, Type = json.loads(a.text)["ou"], json.loads(a.text)["ity"]
        if len(ActualImages) < 10:
            ActualImages.append((link, Type))

    print("there are total", len(ActualImages), "images")

    if not os.path.exists(DIR):
        os.mkdir(DIR)
    DIR = os.path.join(DIR, web_query.split()[0])

    if not os.path.exists(DIR):
        os.mkdir(DIR)
    ###print images
    for i, (img, Type) in enumerate(ActualImages):
        try:
            req = urllib2.Request(img, headers={'User-Agent': header})
            raw_img = urllib2.urlopen(req.full_url).read()
            cntr = len([i for i in os.listdir(DIR) if image_type in i]) + 1
            print(cntr)
            if len(Type) == 0:
                filename = os.path.join(DIR,
                                        image_type + "_" + str(cntr) + ".jpg")
                f = open(filename, 'wb')
            else:
                filename = os.path.join(
                    DIR, image_type + "_" + str(cntr) + "." + Type)
                f = open(filename, 'wb')
            f.write(raw_img)
            f.close()
            target_path = os.path.join(image_dir, "_".join(query.split(' ')),
                                       'supplement.jpg')
            print("TARGET_PATH: ", target_path)
            display_image(unpreprocess_image(load_image(filename)))
            for root, dir, files in os.walk(
                    os.path.join(image_dir, "_".join(query.split(' ')))):
                for file in files:
                    display_image(
                        unpreprocess_image(load_image(os.path.join(root,
                                                                   file))))
            ans = input("KEEP THIS IMAGE?: ")
            if ans == 'y':
                os.rename(filename, target_path)
                break
        except Exception as e:
            print("could not load : " + img)
            print(e)
Exemplo n.º 7
0
import image_utils
import training_utils
import numpy as np

image_color = image_utils.load_image('test samples/test_samples(0,25).png')
img = image_utils.image_bin(image_utils.image_gray(image_color))
img_bin = image_utils.erode(image_utils.dilate(img))

alphabet = []
selected_regions, numbers, alphabet = image_utils.select_roi(
    image_color.copy(), img, alphabet)
image_utils.display_image(selected_regions)

print(alphabet)

inputs = image_utils.prepare_for_ann(numbers)
outputs = training_utils.convert_output(alphabet)

ann = training_utils.load_modell()

#print(outputs)

result = ann.predict(np.array(inputs, np.float32))

final_alphabet = training_utils.convert_output([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])

print(result)
print(training_utils.display_result(result, final_alphabet, alphabet))
        elif direction == "DOWN":
            pixel_list = img[e[0]:, e[1], :]
            sorted_pixels = sorted(pixel_list, key=lambda x: x[2])
            result[e[0]:, e[1], :] = sorted_pixels[::-1]
        elif direction == "LEFT":
            # TODO "Fix value error exception"
            pixel_list = img[e[0], :e[1], :]
            sorted_pixels = sorted(pixel_list, key=lambda x: x[2])
            result[e[0], :e[1], :] = sorted_pixels[::-1]
        elif direction == "RIGHT":
            pixel_list = img[e[0], e[1]:, :]
            sorted_pixels = sorted(pixel_list, key=lambda x: x[2])
            result[e[0], e[1]:, :] = sorted_pixels[::-1]
    return result


if __name__ == '__main__':
    for i in range(2, 3):
        fname = str(i) + ".jpg"
        file_name, image = load_image(fname, cv2_read_param=1)
        display_image(image, title="Original image", cmap=None)

        edgemap = cv2.Canny(image, 100, 500)
        # enhanced_borders = enhance_borders(image, edgemap, marker_size=2)
        # display_image(enhanced_borders, title="Canny", cmap=None)
        # result_img = pixel_propagation_along_edges(image, edgemap, "RIGHT")
        result_img = pixel_sorting_along_edges(image, edgemap, "DOWN")
        # result_img = pixel_sorting_along_edges(result_img, edgemap, "RIGHT")
        display_image(result_img, title="", cmap=None)
        #save_image(file_name+"_sorting_narrow_down"+".png", result_img)