コード例 #1
0
    def __init__(self):

        signdata = SignData()
        self.imageProcessClass = ImageProces()

        self.X_train, self.y_train = signdata.getTrainFeatures()
        self.X_test, self.y_test = signdata.getTestFeatures()
        self.X_valid, self.y_valid = signdata.getValidFeatures()
        self.X_train_aug, self.y_train_aug = signdata.getTrainAugmentsFeatures(
        )
コード例 #2
0
def readtfrecord():

    tfRecordCls = tfRecordHandlerClass()
    imgProcCls = ImageProces()



    FILE = "./tfRecords/trafficSign_aug.tfRecords"
    FILE = "./tfRecords/trafficSign_train.tfRecords"    
    filename_queue = tf.train.string_input_producer([ FILE ], num_epochs=4)

    # get images labels from tf records
    #images, labels = tfRecordCls.read_and_decode(filename_queue,BATCH_SIZE=100,NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN=124465)
    images, labels = tfRecordCls.read_and_decode(filename_queue,BATCH_SIZE=100,NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN=34799)

    #
    #   
    #

    init = tf.global_variables_initializer()
    init2 = tf.local_variables_initializer()
    with tf.Session() as sess:
        sess.run( [init,init2]  )
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        try:
            #while True:
            local_steps = 0
            while not coord.should_stop():
                img,label = sess.run([images,labels])
                local_steps += 1
                #
                # if you want to apply gray scale from tfRecords...
                #
                img = list( map( lambda im : imgProcCls.getGrayScale(im)  , img[:] ) )
                img = np.array(img)
                #print(img.shape)

        except tf.errors.OutOfRangeError:
            # This will be raised when you reach the end of an epoch (i.e. the
            # iterator has no more elements).
            print("REACHED.   OutOfRange from EPOCH (string_input_producer)")
            print("  local steps --> ",local_steps)
            pass                 

        # Perform any end-of-epoch computation here.
        print('Done training, epoch reached')

        coord.request_stop()
        coord.join(threads)
コード例 #3
0
def loadDownloadImage():

    imageProcCls = ImageProces()
    download_images = {}

    downloadDir = "./DownloadsSign"
    files = os.listdir(downloadDir)
    for name in files:
        filename = name.split(".")
        if filename[-1] != "jpg" and filename[-1] != "png":
            continue
        print(name)
        image = cv2.imread( os.path.join(downloadDir,name) )
        image = cv2.resize(image,(32,32))
        
        image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)    
        image = imageProcCls.getGrayScale(image) / 255.

        download_images[filename[0]] = image

    return download_images 
コード例 #4
0
def main(argv):

    print("Preparing training data.")
    #mnist = input_data.read_data_sets("./data/", validation_size=0, one_hot=True)

    print("-" * 30)
    print("      initialize resource")
    imgProcCls = ImageProces()
    sign_image = SignImageClass()
    sign_image.imagePreprocessNormalize()

    vgg19 = Vgg19()
    
    print("-" * 40)
    print("      defined hyper parameters")
    print("dropout keep prob:",  DEFINE.dropout_keep_prob)
    print("epochs :",  DEFINE.epochs)
    print("BATCH_SIZE :",  DEFINE.train_batch_size)

    NUM_CLASS = 43
    BATCH_SIZE = DEFINE.train_batch_size

    x = tf.placeholder(tf.float32, [None, 32, 32, 1])
    resized = tf.image.resize_images(x, (227, 227))

    #y_ = tf.placeholder(tf.float32, [None, NUM_CLASS])    
    y_ = tf.placeholder(tf.int64, [None])
    y_one_hot = tf.one_hot(y_, depth=NUM_CLASS, dtype=tf.float32)

    # placeholder drop out
    dropout_keep_prob = tf.placeholder(tf.float32)
        
    logits = vgg19.model(resized, NUM_CLASS , dropout_keep_prob)
    prob_img = tf.nn.softmax(logits)

    cost = vgg19.cross_entropy(y_one_hot,logits)
    train_op, global_step = vgg19.train(cost)
    accuracy = vgg19.accuracy(logits, y_one_hot)

    init = tf.global_variables_initializer()
    init2 = tf.local_variables_initializer()
    with tf.Session() as sess:
        sess.run( [init,init2]  )
        saver = tf.train.Saver()

        length_train_data = sign_image.train_aug_data_length()
        length_valid_data = sign_image.valid_data_length()

        g_step=tf.train.global_step(sess, global_step)
            
        for it in range(DEFINE.epochs):

            sign_image.shuffle_train_aug()

            for offset in range(0,length_train_data,BATCH_SIZE):
                features_batch,labels_batch = sign_image.batch_train_aug(offset,batch_size=BATCH_SIZE)

                #features_batch = list( map( lambda im : imgProcCls.getGrayScale(im)  , features_batch[:] ) )
                #features_batch = np.array(features_batch) / 255.

                feeds = {x:features_batch, y_:labels_batch , dropout_keep_prob:DEFINE.dropout_keep_prob   }
                train_op.run(feed_dict=feeds)

                print(g_step)
            #print('global_step: %s' % g_step)


            total_acc = []
            for offset in range(0,length_valid_data,BATCH_SIZE):
                features_batch,labels_batch = sign_image.batch_valid(offset,batch_size=BATCH_SIZE)

                #features_batch = list( map( lambda im : imgProcCls.getGrayScale(im)  , features_batch[:] ) )
                #features_batch = np.array(features_batch) / 255.

                feeds = {x:features_batch, y_:labels_batch, dropout_keep_prob:DEFINE.dropout_keep_prob}
                acc_, cost_ = sess.run([accuracy,cost],feed_dict=feeds)
                total_acc.append( acc_ * BATCH_SIZE )
            accuracy_ = np.sum( total_acc ) / np.float(length_valid_data)
            print("EPOCH:%d validation - total accuracy : %.4f" % (it, accuracy_) )
コード例 #5
0
def traing_testing():

    tfRecordCls = tfRecordHandlerClass()
    imgProcCls = ImageProces()





    FILE_train = "./tfRecords/trafficSign_aug.tfRecords"
    FILE_valid = "./tfRecords/trafficSign_test.tfRecords"    
    filename_queue_train = tf.train.string_input_producer([ FILE_train ], num_epochs=4)
    filename_queue_test = tf.train.string_input_producer([ FILE_test ], num_epochs=1)

    # get images labels from tf records
    images_train, labels_train = tfRecordCls.read_and_decode(filename_queue_train,BATCH_SIZE=100,NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN=124465)
    images_test, labels_test = tfRecordCls.read_and_decode(filename_queue_test,BATCH_SIZE=100,NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN=34799)

    #
    #  define LeNet Model   
    #
    x = tf.placeholder(tf.float32, (None, 32, 32, 1))
    y_ = tf.placeholder(tf.int64, [None])
    y_one_hot = tf.one_hot(y_, depth=43, dtype=tf.float32)

    logits = LeNet(x,43)

    with tf.variable_scope("cost") as scope:
        #cost = tf.reduce_sum(tf.pow(pred_y - y_, 2))/(2*n_samples)
        softmax = tf.nn.softmax_cross_entropy_with_logits(labels=y_one_hot, logits=logits)
        cost = tf.reduce_mean(softmax)

    with tf.variable_scope("train") as scope:

        #train_op = tf.train.AdamOptimizer(1e-4).minimize(cost)
        optimizer = tf.train.AdamOptimizer(learning_rate = 0.001)
        train_op = optimizer.minimize( cost )

    with tf.variable_scope("acc") as scope:
        correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_one_hot, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    init = tf.global_variables_initializer()
    init2 = tf.local_variables_initializer()
    with tf.Session() as sess:
        sess.run( [init,init2]  )
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        try:
            local_steps = 0
            while not coord.should_stop():
                img,label = sess.run([images_train,labels_train])
                local_steps += 1
                #
                # if you want to apply gray scale from tfRecords...
                #
                img = list( map( lambda im : imgProcCls.getGrayScale(im)  , img[:] ) )
                img = np.array(img)
    
                feeds = {x:img, y_:label}
                train_op.run(feed_dict=feeds)

                if local_steps % 1244 == 0:
                    total_acc = []

                    for offset in range(0,length_valid_data,BATCH_SIZE):
                        features_batch,labels_batch = sign_image.batch_valid(offset,batch_size=BATCH_SIZE)
                        feeds = {x:features_batch, y_:labels_batch}
                        acc_, cost_ = sess.run([accuracy,cost],feed_dict=feeds)
                        total_acc.append( acc_ * BATCH_SIZE )
                    accuracy_ = np.sum( total_acc ) / np.float(length_valid_data)
                    print("EPOCH:%d total accuracy : %.4f" % (it, accuracy_) )




        except tf.errors.OutOfRangeError:
            # This will be raised when you reach the end of an epoch (i.e. the
            # iterator has no more elements).
            print("REACHED.   OutOfRange from EPOCH (string_input_producer)")
            print("  local steps --> ",local_steps)
            pass                 

        # Perform any end-of-epoch computation here.
        print('Done training, epoch reached')

        coord.request_stop()
        coord.join(threads)
コード例 #6
0
def traing_testing():

    imgProcCls = ImageProces()
    sign_image = SignImageClass()
    sign_image.imagePreprocessNormalize()

    BATCH_SIZE = 64

    #
    #  define LeNet Model   
    #
    x = tf.placeholder(tf.float32, (None, 32, 32, 1))
    y_ = tf.placeholder(tf.int64, [None])
    y_one_hot = tf.one_hot(y_, depth=43, dtype=tf.float32)

    logits = LeNet(x,43)

    with tf.variable_scope("cost") as scope:
        #cost = tf.reduce_sum(tf.pow(pred_y - y_, 2))/(2*n_samples)
        softmax = tf.nn.softmax_cross_entropy_with_logits(labels=y_one_hot, logits=logits)
        cost = tf.reduce_mean(softmax)

    with tf.variable_scope("train") as scope:
        global_step = tf.Variable(0, name='global_step',trainable=False)
        #train_op = tf.train.AdamOptimizer(1e-4).minimize(cost)
        optimizer = tf.train.AdamOptimizer(learning_rate = 0.001)
        train_op = optimizer.minimize( cost , global_step=global_step )

    with tf.variable_scope("acc") as scope:
        correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_one_hot, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print("Start Valuation of Augmentation Image train data ....")

    init = tf.global_variables_initializer()
    init2 = tf.local_variables_initializer()
    with tf.Session() as sess:
        sess.run( [init,init2]  )

        saver = tf.train.Saver()

        EPOCH = 32
        length_train_data = sign_image.train_aug_data_length()
        length_valid_data = sign_image.valid_data_length()

        for it in range(EPOCH):

            #print("EPOCH", it)
            
            #
            # Augmentation image data shuffling 
            #

            sign_image.shuffle_train_aug()


            for offset in range(0,length_train_data,BATCH_SIZE):
                features_batch,labels_batch = sign_image.batch_train_aug(offset,batch_size=BATCH_SIZE)

                #features_batch = list( map( lambda im : imgProcCls.getGrayScale(im)  , features_batch[:] ) )
                #features_batch = np.array(features_batch) / 255.

                feeds = {x:features_batch, y_:labels_batch}
                train_op.run(feed_dict=feeds)

            g_step=tf.train.global_step(sess, global_step)
            #print('global_step: %s' % g_step)


            total_acc = []
            for offset in range(0,length_valid_data,BATCH_SIZE):
                features_batch,labels_batch = sign_image.batch_valid(offset,batch_size=BATCH_SIZE)

                #features_batch = list( map( lambda im : imgProcCls.getGrayScale(im)  , features_batch[:] ) )
                #features_batch = np.array(features_batch) / 255.

                feeds = {x:features_batch, y_:labels_batch}
                acc_, cost_ = sess.run([accuracy,cost],feed_dict=feeds)
                total_acc.append( acc_ * BATCH_SIZE )
            accuracy_ = np.sum( total_acc ) / np.float(length_valid_data)
            print("EPOCH:%d validation - total accuracy : %.4f" % (it, accuracy_) )

        save_path = saver.save(sess,"./lenet_aug_model/lenet2_aug.ckpt", global_step=g_step)
        print("-" * 30 )
        print("-- Model saved in file: ", save_path )
コード例 #7
0
class SignImageClass():
    def __init__(self):

        signdata = SignData()
        self.imageProcessClass = ImageProces()

        self.X_train, self.y_train = signdata.getTrainFeatures()
        self.X_test, self.y_test = signdata.getTestFeatures()
        self.X_valid, self.y_valid = signdata.getValidFeatures()
        self.X_train_aug, self.y_train_aug = signdata.getTrainAugmentsFeatures(
        )

    def imagePreprocessNormalize(self):

        print("<SignImageClass> Image Preprocess...Normalize  ")
        print("     train test valid and augmentation train ...")
        self.X_train = self.preprocessImages(self.X_train)
        self.X_test = self.preprocessImages(self.X_test)
        self.X_valid = self.preprocessImages(self.X_valid)
        self.X_train_aug = self.preprocessImages(self.X_train_aug)

    def train_aug_data_length(self):
        return self.X_train_aug.shape[0]

    def train_data_length(self):
        return self.X_train.shape[0]

    def test_data_length(self):
        return self.X_test.shape[0]

    def valid_data_length(self):
        return self.X_valid.shape[0]

    def getValidPreprocess(self):
        return self.X_valid, self.y_valid

    def label_one_hot(self):

        pass

    def shuffle_train_aug(self):

        num_images = self.X_train_aug.shape[0]
        r = np.random.permutation(num_images)
        self.X_train_aug = self.X_train_aug[r]
        self.y_train_aug = self.y_train_aug[r]

    def shuffle_train(self):

        num_images = self.X_prep_train.shape[0]
        r = np.random.permutation(num_images)
        self.X_prep_train = self.X_prep_train[r]
        self.y_train = self.y_train[r]

    def batch_train_aug(self, offset=0, batch_size=64):

        features_batch = self.X_train_aug[offset:offset + batch_size]
        labels_batch = self.y_train_aug[offset:offset + batch_size]

        return features_batch, labels_batch

    def batch_train(self, offset=0, batch_size=64):

        features_batch = self.X_train[offset:offset + batch_size]
        labels_batch = self.y_train[offset:offset + batch_size]

        return features_batch, labels_batch

    def batch_valid(self, offset=0, batch_size=64):

        features_batch = self.X_valid[offset:offset + batch_size]
        labels_batch = self.y_valid[offset:offset + batch_size]

        return features_batch, labels_batch

    def batch_test(self, offset=0, batch_size=64):

        features_batch = self.X_test[offset:offset + batch_size]
        labels_batch = self.y_test[offset:offset + batch_size]

        return features_batch, labels_batch

    def preprocessImages(self, images):

        gray_images = list(
            map(lambda im: self.imageProcessClass.getGrayScale(im), images[:]))
        gray_images = np.array(gray_images) / 255.0

        return gray_images

    def preprocess_image(self, X):

        # convert to gray scale
        X = self.rgb2gray(X)
        #X = getGrayScale(X)

        # normalize with [0 1]
        X_norm = (X / 255.).astype(np.float32)
        X_norm = X_norm.reshape(X_norm.shape + (1, ))

        return X_norm

    def convert_to_full_records_prep(self):

        tf_filenames = [
            "signtraffic_train.tfrecords", "signtraffic_test.tfrecords",
            "signtraffic_valid.tfrecords"
        ]
        image_list = [self.X_prep_train, self.X_prep_test, self.X_prep_valid]
        label_list = [self.y_train, self.y_test, self.y_valid]

        tfRecordCls = tfRecordHandlerClass()
        for idx, (images, labels) in enumerate(zip(image_list, label_list)):

            tfRecordCls.convert_to_records(images, labels, tf_filenames[idx])

    def convert_to_full_records(self):

        tf_filenames = [
            "trafficSign_train.tfRecords", "trafficSign_test.tfrecords",
            "trafficSign_valid.tfrecords"
        ]
        image_list = [self.X_train, self.X_test, self.X_valid]
        label_list = [self.y_train, self.y_test, self.y_valid]

        tfRecordCls = tfRecordHandlerClass()
        for idx, (images, labels) in enumerate(zip(image_list, label_list)):

            tfRecordCls.convert_to_records(images, labels, tf_filenames[idx])