コード例 #1
0
def functionality1(image_name: str, plot):
    image_data = ImageData("images\\" + image_name)

    y, i, q = Converter().rgb_to_yiq(image_data.get_matrix_red(),
                                     image_data.get_matrix_green(),
                                     image_data.get_matrix_blue())
    r, g, b = Converter().yiq_to_rgb(y, i, q)

    image_data.set_rgb_from_matrices(r, g, b)
    new_image_path = image_data.save_image(
        new_file_name_suffix='(rgb-yiq-rgb)')
    show_image(new_image_path, plot)
コード例 #2
0
def functionality5(image_name: str, plot):
    image_data = ImageData("images\\" + image_name)

    red_median = LocalFilter().apply_median_filter(image_data.get_matrix_red(),
                                                   mask_size=(10, 10))
    green_median = LocalFilter().apply_median_filter(
        image_data.get_matrix_green(), mask_size=(10, 10))
    blue_median = LocalFilter().apply_median_filter(
        image_data.get_matrix_blue(), mask_size=(10, 10))

    image_data.set_rgb_from_matrices(red_median, green_median, blue_median)
    image_filtered_median_path = image_data.save_image(
        new_file_name_suffix='(mediana)')
    show_image(image_filtered_median_path, plot)
コード例 #3
0
def main() -> None:

    dat1 = ImageData(test_size=.75, gen_new_images=True)
    cn1 = ConvNet(cross_validating=True, model_num=16, conv_layers=(32, 64, 128, 256),
                  dense_layers=(256, 128), epochs=500, learning_rate=0.0001, patience=10)
    cn1.fit(dat1)
    sub1 = cn1.evaluate(dat1)

    dat1 = ImageData(test_size=.75, gen_new_images=True)
    cn2 = ConvNet(cross_validating=True, model_num=17, conv_layers=(32, 64, 128, 128),
                  dense_layers=(256, 128), epochs=500, learning_rate=0.0001, patience=10)
    cn2.fit(dat1)
    sub2 = cn2.evaluate(dat1)

    dat1 =ImageData(test_size=.75, gen_new_images=True)
    cn3 = ConvNet(cross_validating=True, model_num=18, conv_layers=(32, 64, 128, 256),
                  dense_layers=(512, 256), epochs=500, learning_rate=0.0001, patience=10)
    cn3.fit(dat1)
    sub3 = cn3.evaluate(dat1)

    dat1 = ImageData(test_size=.75, gen_new_images=True)
    cn4 = ConvNet(cross_validating=True, model_num=19, conv_layers=(16, 32, 64, 128),
                  dense_layers=(256, 128), epochs=500, learning_rate=0.0001, patience=10)
    cn4.fit(dat1)
    sub4 = cn4.evaluate(dat1)

    dat1 = ImageData(test_size=.75, gen_new_images=True)
    cn5 = ConvNet(cross_validating=True, model_num=20, conv_layers=(64, 128, 128, 64),
                  dense_layers=(256, 128), epochs=500, learning_rate=0.0001, patience=10)
    cn5.fit(dat1)
    sub5 = cn5.evaluate(dat1)

    

    ensemble = pd.DataFrame([sub1.id,
                             sub1.is_icerberg,
                             sub2.is_iceberg,
                             sub3.is_iceberg,
                             sub4.is_iceberg,
                             sub5.is_iceberg]).T
コード例 #4
0
    def fit(self, data: ImageData) -> None:
        # fit is called to begin training
        # receives ImageData object which has as attributes the necesary test/training data
        # all data transformations are applied at creation of ImageData object

        generator = data.gen_flow_(data.X_train, data.X_angle_train,
                                   data.y_train, self.batch_size)
        batch_size = 32

        self.model.fit_generator(
            generator,
            validation_data=([data.X_valid, data.X_angle_valid], data.y_valid),
            steps_per_epoch=len(data.X_train) / self.batch_size,
            epochs=self.epochs,
            callbacks=self.callbacks,
            verbose=1)
        self.save_model()
コード例 #5
0
ファイル: main.py プロジェクト: tomupson/nn-quickdraw-replica
def prepareImageData(category, rawData, label, customTestingFiles, limit):
    # No longer required, as the numpy file can be read directly from the site
    # into the seperateImages, as long as the 80 header bytes are accounted for.
    #npybin.convertImagesToBinary("data/npy/" + imageCategory + ".npy", count)
    Logger.Log("Loading data for \"" + category.category + "\"", LogLevel.INFO)
    
    contentLength = int(rawData.headers['content-length'])
    if (limit == None or NUMPY_HEADER_BYTES + (limit * BYTES_PER_IMAGE) > contentLength):
        limit = int((contentLength - NUMPY_HEADER_BYTES) / BYTES_PER_IMAGE)
        Logger.Log("Image count set to max (" + str(int((contentLength - NUMPY_HEADER_BYTES) / BYTES_PER_IMAGE)) + " images).", LogLevel.INFO)

    try:
        imageData = ImageHandler.seperateImages(rawData.read(NUMPY_HEADER_BYTES + (BYTES_PER_IMAGE * limit)))
    except Exception as e:
        Logger.Log("Error loading data: " + str(e))
    
    Logger.Log("Finished loading data.", LogLevel.INFO)
    
    completeImageData = [ImageData(x, label) for x in imageData]
    threshold = round(0.8 * len(completeImageData))
    category.training = completeImageData[0:threshold]
    category.testing = completeImageData[threshold:len(imageData)]
    category.custom_testing = customTestingFiles
コード例 #6
0
def main():
    
    # base model with angle implemented
    dat1 = ImageData(train_size=.75, gen_new_images=False)
    cn1 = ConvNet(cross_validating=True, model_num=200, conv_layers=(64, 128, 128, 64),
                    dense_layers=(512, 256), epochs=500, learning_rate=0.001, dropout=0, patience=10)
    cn1.fit(dat1)
    sub1 = cn1.evaluate(dat1)

    # lower learning rate
    dat1 = ImageData(train_size=.75, gen_new_images=False)
    cn2 = ConvNet(cross_validating=True, model_num=201, conv_layers=(64, 128, 128, 64),
                    dense_layers=(512, 256), epochs=500, learning_rate=0.0001, dropout=0, patience=10)
    cn2.fit(dat1)
    sub2 = cn2.evaluate(dat1)

    # lower lr, gen new images
    dat1 =ImageData(train_size=.75, gen_new_images=True)
    cn3 = ConvNet(cross_validating=True, model_num=202, conv_layers=(64, 128, 128, 64),
                    dense_layers=(512, 256), epochs=500, learning_rate=0.0001, dropout=0, patience=10)
    cn3.fit(dat1)
    sub3 = cn3.evaluate(dat1)

    # lower lr, dropout 
    dat1 =ImageData(train_size=.75, gen_new_images=False)
    cn4 = ConvNet(cross_validating=True, model_num=203, conv_layers=(64, 128, 128, 64),
                    dense_layers=(512, 256), epochs=500, learning_rate=0.0001, dropout=.3, patience=10)
    cn4.fit(dat1)
    sub4 = cn4.evaluate(dat1)

    # set 1
    dat1 = ImageData(train_size=.75, gen_new_images=True)
    cn5 = ConvNet(cross_validating=True, model_num=204, conv_layers=(16, 32, 64, 128),
                  dense_layers=(256, 128), epochs=500, learning_rate=0.0001, dropout=0, patience=10)
    cn5.fit(dat1)
    sub5 = cn5.evaluate(dat1)

    dat1 = ImageData(train_size=.75, gen_new_images=True)
    cn6 = ConvNet(cross_validating=True, model_num=205, conv_layers=(16, 32, 64, 128),
                  dense_layers=(512, 256), epochs=500, learning_rate=0.0001, dropout=0, patience=10)
    cn6.fit(dat1)
    sub6 = cn6.evaluate(dat1)
コード例 #7
0
@author: Jason
"""

# 3. Multilayer Perceptrons

import mnist
from ImageData import ImageData
from Model import Model

# Preprocess the images and labels using the ImageData class
# For this task the image will need to be in the format
# (image, example) with flattened images and one-hot-encoded
# labels (of size (n_examples, 10) as there are 10 possible labels 0-9)
# MNIST has 60,000 training examples and 10,000 validation examples
data = ImageData()
data.get_data(dataset=mnist)
data.data_preprocess(pre_shuffle=False,
                     normalize=True,
                     flatten=True,
                     pad=False,
                     img_first_format=True,
                     one_hot_encode=True)

# Define and use a multi layer perceptrons (MLP) model for learning the MNIST
# data, then plot the training and validation accuracy as a function of epoch
# and save this data to a csv file
input_size = data.train_data.shape[0]
hidden_size = 20
output_size = data.train_labels.shape[1]
save_dir = "C:/Users/Jason/Documents/"
コード例 #8
0
ファイル: Spreadsheet.py プロジェクト: Jack-kelly-22/ps-4
 def output_set(self,image_path):
     th = 73
     while(th<144):
         image_data= ImageData("frame",image_path,th,(1,5000,1),self.path)
         self.output_image(image_data)
         th= th + 5
コード例 #9
0
ファイル: MarkovChain.py プロジェクト: mikemehl/pythonSandbox
def main():
    img = ImageData(SAMPLE_FILE, 200)
    dude = img.getParams()
    mtx = MarkovMatrix(img)
    #print(metric(np.array([1,2,3]),np.array([4,5,6])))
    return
コード例 #10
0
def functionality7(image_name, pattern_name, plot):
    pattern_data = ImageData("images\\" + pattern_name)
    pattern_red = pattern_data.get_matrix_red()
    pattern_green = pattern_data.get_matrix_green()
    pattern_blue = pattern_data.get_matrix_blue()

    image_data = ImageData("images\\" + image_name)
    image_red = LocalFilter().zero_padding(
        image_data.get_matrix_red(),
        (pattern_data.number_rows, pattern_data.number_columns))
    image_green = LocalFilter().zero_padding(
        image_data.get_matrix_green(),
        (pattern_data.number_rows, pattern_data.number_columns))
    image_blue = LocalFilter().zero_padding(
        image_data.get_matrix_blue(),
        (pattern_data.number_rows, pattern_data.number_columns))

    mean_cross_correlation = []

    # Itera até menos o pattern para não ultrapassar os limites da imagem com o local i e j:
    for i in tqdm(range(image_data.number_rows - pattern_data.number_rows)):
        mean_cross_correlation_row = []
        for j in range(image_data.number_columns -
                       pattern_data.number_columns):
            red_local_matrix = []
            green_local_matrix = []
            blue_local_matrix = []
            # Geração da matriz local de cada canal:
            for local_i in range(pattern_data.number_rows):
                red_local_matrix_row = []
                green_local_matrix_row = []
                blue_local_matrix_row = []
                for local_j in range(pattern_data.number_columns):
                    red_local_matrix_row.append(
                        image_red[i + local_i][j + local_j])
                    green_local_matrix_row.append(
                        image_green[i + local_i][j + local_j])
                    blue_local_matrix_row.append(
                        image_blue[i + local_i][j + local_j])
                red_local_matrix.append(red_local_matrix_row)
                green_local_matrix.append(green_local_matrix_row)
                blue_local_matrix.append(blue_local_matrix_row)

            red_correlation = LocalFilter().correlation(
                red_local_matrix, pattern_red)
            green_correlation = LocalFilter().correlation(
                green_local_matrix, pattern_green)
            blue_correlation = LocalFilter().correlation(
                blue_local_matrix, pattern_blue)

            mean_cross_correlation_row.append(
                (red_correlation + green_correlation + blue_correlation) / 3)

        mean_cross_correlation.append(mean_cross_correlation_row)

    mean_cross_correlation = np.asmatrix(mean_cross_correlation)

    biggest_mean_correlation_positions = np.where(
        mean_cross_correlation == mean_cross_correlation.max())
    mean_row_center = biggest_mean_correlation_positions[0][0]
    mean_col_center = biggest_mean_correlation_positions[1][0]

    if not plot in ["False", "false", False]:
        # Correlação mapeada e exibida em tons de cinza:
        show_gray_map(mean_cross_correlation,
                      original_image_path="images\\" + image_name,
                      save_plot_suffix="(corr-gray-map)")
        # Exibição da imagem com a região de maior correlação destacada:
        show_image_with_dot_rectangle(
            "images\\" + image_name,
            plot, (mean_col_center, mean_row_center),
            (pattern_data.number_columns, pattern_data.number_rows),
            save_plot_suffix="(corr-result)")
コード例 #11
0
def functionality4(image_name: str, plot):
    # PARTE 1 - Aplicando Matriz 25x25
    image_data = ImageData("images\\" + image_name)

    start = time.time()

    red_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_red(),
                                               mask_size=(25, 25))
    green_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_green(),
                                                 mask_size=(25, 25))
    blue_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_blue(),
                                                mask_size=(25, 25))

    image_data.set_rgb_from_matrices(red_mean, green_mean, blue_mean)
    image_filtered_mean_path = image_data.save_image(
        new_file_name_suffix='(media-25x25)')

    end = time.time()
    print(end - start)

    show_image(image_filtered_mean_path, plot)

    # PARTE 2 - Aplicando Matriz 25x1 e depois 1x25
    image_data = ImageData("images\\" + image_name)

    start = time.time()

    red_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_red(),
                                               mask_size=(25, 1))
    green_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_green(),
                                                 mask_size=(25, 1))
    blue_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_blue(),
                                                mask_size=(25, 1))
    image_data.set_rgb_from_matrices(red_mean, green_mean, blue_mean)

    red_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_red(),
                                               mask_size=(1, 25))
    green_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_green(),
                                                 mask_size=(1, 25))
    blue_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_blue(),
                                                mask_size=(1, 25))
    image_data.set_rgb_from_matrices(red_mean, green_mean, blue_mean)

    image_filtered_mean_path = image_data.save_image(
        new_file_name_suffix='(media-25x1-e-1x25)')

    end = time.time()
    print(end - start)

    show_image(image_filtered_mean_path, plot)
コード例 #12
0
def functionality3(image_name: str, plot):
    # PARTE 1 - Filtro Média
    image_data = ImageData("images\\" + image_name)
    red_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_red(),
                                               mask_size=(5, 5))
    green_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_green(),
                                                 mask_size=(5, 5))
    blue_mean = LocalFilter().apply_mean_filter(image_data.get_matrix_blue(),
                                                mask_size=(5, 5))

    image_data.set_rgb_from_matrices(red_mean, green_mean, blue_mean)
    image_filtered_mean_path = image_data.save_image(
        new_file_name_suffix='(media)')

    show_image(image_filtered_mean_path, plot)

    # PARTE 2 - Filtros de Sobel
    image_data = ImageData("images\\" + image_name)

    sobel_horizontal_mask = Matrix().get_matrix_from_file(
        'mask\\sobel horizontal.txt')
    red_sobel_horizontal = LocalFilter().apply_generic_filter(
        image_data.get_matrix_red(), sobel_horizontal_mask)
    green_sobel_horizontal = LocalFilter().apply_generic_filter(
        image_data.get_matrix_green(), sobel_horizontal_mask)
    blue_sobel_horizontal = LocalFilter().apply_generic_filter(
        image_data.get_matrix_blue(), sobel_horizontal_mask)

    image_data.set_rgb_from_matrices(red_sobel_horizontal,
                                     green_sobel_horizontal,
                                     blue_sobel_horizontal)
    image_filtered_sobel_horizontal_path = image_data.save_image(
        new_file_name_suffix='(sobel-horizontal)')
    show_image(image_filtered_sobel_horizontal_path, plot)

    image_data = ImageData("images\\" + image_name)

    sobel_vertical_mask = Matrix().get_matrix_from_file(
        'mask\\sobel vertical.txt')
    red_sobel_vertical = LocalFilter().apply_generic_filter(
        image_data.get_matrix_red(), sobel_vertical_mask)
    green_sobel_vertical = LocalFilter().apply_generic_filter(
        image_data.get_matrix_green(), sobel_vertical_mask)
    blue_sobel_vertical = LocalFilter().apply_generic_filter(
        image_data.get_matrix_blue(), sobel_vertical_mask)

    image_data.set_rgb_from_matrices(red_sobel_vertical, green_sobel_vertical,
                                     blue_sobel_vertical)
    image_filtered_sobel_vertical_path = image_data.save_image(
        new_file_name_suffix='(sobel-vertical)')
    show_image(image_filtered_sobel_vertical_path, plot)
コード例 #13
0
def functionality2(image_name: str, plot):
    # PARTE 1 - Negativo em RGB Banda a Banda
    image_data = ImageData("images\\" + image_name)

    r_negative = PointFilter().apply_negative(image_data.get_matrix_red())
    g_negative = PointFilter().apply_negative(image_data.get_matrix_green())
    b_negative = PointFilter().apply_negative(image_data.get_matrix_blue())

    image_data.set_rgb_from_matrices(r_negative, g_negative, b_negative)
    new_image_path = image_data.save_image(
        new_file_name_suffix='(negative-rgb)')
    show_image(new_image_path, plot)

    # PARTE 2 - Negativo em Y
    image_data = ImageData("images\\" + image_name)

    y, i, q = Converter().rgb_to_yiq(image_data.get_matrix_red(),
                                     image_data.get_matrix_green(),
                                     image_data.get_matrix_blue())

    y_negative = PointFilter().apply_negative(y)

    r, g, b = Converter().yiq_to_rgb(y_negative, i, q)

    image_data.set_rgb_from_matrices(r, g, b)
    new_image_path = image_data.save_image(new_file_name_suffix='(negative-y)')
    show_image(new_image_path, plot)
コード例 #14
0
                   [0, 1, 0, 1, 0], [0, 1, 1, 0, 0]],
                  [[1, 1, 1, 1, 1], [1, 0, 0, 0, 1], [1, 0, 1, 0, 1],
                   [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]],
                  [[0, 1, 1, 1, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 1],
                   [1, 0, 0, 0, 1], [0, 1, 1, 1, 0]]])

one_labels = np.array([[1], [1], [1], [1], [1], [1]])

zero_labels = np.array([[-1], [-1], [-1], [-1], [-1], [-1]])

# Preprocess the images and labels using the ImageData class
# For this task the image will need to be in the format
# (image, example) with flattened images
# The training set will have 8 examples (4 1s, 4 0s) and the
# validation set will have 4 examples (2 1s, 2 0s)
data = ImageData()
data.get_data(images1=ones,
              image_labels1=one_labels,
              images2=zeros,
              image_labels2=zero_labels)
data.data_preprocess(split_data=True,
                     normalize=False,
                     flatten=True,
                     img_first_format=True,
                     one_hot_encode=False)

# Define and train a single perceptron model for learning this dataset, then
# plot the training and validation accuracy as a function of epoch and save
# this data to a csv file
input_size = data.train_data.shape[0]
output_size = data.train_labels.shape[1]
コード例 #15
0
    # set 1
    dat1 = ImageData(train_size=.75, gen_new_images=True)
    cn5 = ConvNet(cross_validating=True, model_num=204, conv_layers=(16, 32, 64, 128),
                  dense_layers=(256, 128), epochs=500, learning_rate=0.0001, dropout=0, patience=10)
    cn5.fit(dat1)
    sub5 = cn5.evaluate(dat1)

    dat1 = ImageData(train_size=.75, gen_new_images=True)
    cn6 = ConvNet(cross_validating=True, model_num=205, conv_layers=(16, 32, 64, 128),
                  dense_layers=(512, 256), epochs=500, learning_rate=0.0001, dropout=0, patience=10)
    cn6.fit(dat1)
    sub6 = cn6.evaluate(dat1)

>>>>>>> 980052548dd8be2efd48a7a7fd93b484b6a3b985

    dat1 = ImageData(train_size=.75, gen_new_images=True)
    cn8 = ConvNet(cross_validating=True, model_num=206, conv_layers=(32, 64, 128, 256),
                  dense_layers=(256, 128), epochs=500, learning_rate=0.0001, dropout=0, patience=10)
    cn8.fit(dat1)
    sub8 = cn8.evaluate(dat1)

    dat1 = ImageData(train_size=.75, gen_new_images=True)
    cn9 = ConvNet(cross_validating=True, model_num=207, conv_layers=(32, 64, 128, 256),
                  dense_layers=(512, 256), epochs=500, learning_rate=0.0001, dropout=0, patience=10)
    cn9.fit(dat1)
    sub9 = cn9.evaluate(dat1)

    
    dat1 = ImageData(train_size=.75, gen_new_images=True)
    cn10 = ConvNet(cross_validating=True, model_num=208, conv_layers=(32, 64, 128, 128),
                    dense_layers=(256, 128), epochs=500, learning_rate=0.0001, dropout=0, patience=10)
コード例 #16
0
def createImagesFor(customTestingDataFileLocations, label):
    images = []
    for filename in customTestingDataFileLocations:
        im = Image.open(filename).resize((28, 28)).convert("1")
        images.append(ImageData(im.getdata(), label))
    return images