def get_mynet_input(key): class mynetRecord(object): pass result = mynetRecord() label_bytes = 4 result.height = 64 result.width = 64 result.depth = 4 print(key) if (key[0] == 'shape'): target = target_gen.generate_image(requested_shape=key[1], requested_label=key[0]) elif (key[0] == 'letter'): target = target_gen.generate_image(requested_letter=key[1], requested_label=key[0]) result.label = target.label print("label: " + str(result.label)) record_bytes = tf.decode_raw(target.image, tf.uint8) depth_major = tf.reshape(record_bytes, [result.depth, result.height, result.width]) result.uint8image = tf.transpose(depth_major, [1, 2, 0]) return result
import target_gen import sys import argparse parser = argparse.ArgumentParser( description='request specific shape,letter,colors') parser.add_argument('-s', help='generate a specific shape') parser.add_argument('-l', help='generate a specific letter') parser.add_argument('-sc', help='generate a specific shape color') parser.add_argument('-lc', help='generate a specific letter color') args = parser.parse_args() s = l = sc = lc = None if (args.s): s = args.s if (args.l): l = args.l if (args.sc): sc = args.sc if (args.lc): lc = args.lc print( target_gen.generate_image(requested_shape=s, requested_letter=l, requested_shape_color=sc, requested_letter_color=lc))
labels = [None] * num_training_images images_test = [None] * num_testing_images labels_test = [None] * num_testing_images letter_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'] shape_list = ['Circle', 'Semicircle', 'Quartercircle', 'Triangle', 'Square', 'Rectangle', 'Trapezoid', 'Pentagon', 'Hexagon', 'Heptagon', 'Octagon', 'Star', 'Cross'] counter = 0 for i in range(0, 26): for a in range(0, 13): tmp_img, tmp_label = target_gen.generate_image(requested_letter=letter_list[i], requested_shape=shape_list[a], requested_letter_color="White", requested_shape_color="Black", return_type = "set") for q in range(0, num_variations): tmp_img_2 = tmp_img tmp_img_2 = tmp_img_2.filter(ImageFilter.FIND_EDGES) tmp_img_2 = tmp_img_2.filter(ImageFilter.SMOOTH) tmp_img_2 = tmp_img_2.filter(ImageFilter.SMOOTH_MORE) tmp_img_2 = tmp_img_2.filter(ImageFilter.FIND_EDGES) tmp_img_2 = tmp_img_2.convert('L') tmp_img_2 = tmp_img_2.filter(ImageFilter.EDGE_ENHANCE_MORE) images[counter] = np.reshape(tmp_img_2.getdata(), (64, 64, -1)) #labels[counter] = np.reshape(tmp_label, (-1)) #labels[counter] = tflearn.data_utils.to_categorical(tmp_label,338) labels[counter] = tmp_label
import tflearn import numpy as np from tflearn.data_utils import shuffle from tflearn.data_utils import * from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.estimator import regression from tflearn.data_preprocessing import ImagePreprocessing from tflearn.data_augmentation import ImageAugmentation import pickle import target_gen from PIL import Image from PIL import ImageFilter import sys np.set_printoptions(threshold=np.inf) # load dataset of auvsi targets # or generate them on demand here?? num_testing_images = 100 if sys.argv[1]: num_testing_images = int(sys.argv[1]) for i in range(0, num_testing_images): tmp_img, tmp_label = target_gen.generate_image(return_type="shape") sys.stdout.write("Generating image %d/%d \r" % (i, num_testing_images)) sys.stdout.flush() sys.stdout.write("Generating image %d/%d \n" % (i + 1, num_testing_images)) sys.stdout.write("Finished generating images\n")