Esempio n. 1
0
def img_size_run_cnn(size):
    print('\n\n\nstart with size: ' + str(size))
    folder_to_save = './out/alex_s' + str(size) + '_' + str(ITERATION)
    folder_to_load = './out/alex_s' + str(size) + '_' + str(ITERATION-1) + '/model/alex_model'
    x, y = image_preloader(TRAIN_DATA,
                           image_shape=(size, size),
                           mode='file',
                           files_extension=['.png'])
    x_val, y_val = image_preloader(VAL_DATA,
                                   image_shape=(size, size),
                                   mode='file',
                                   files_extension=['.png'])
    model = get_alex_model(
        filter_size=size,
        folder_to_save=folder_to_save,
        folder_to_load=folder_to_load,
        image_size=size,
        strides=4,
        learning_rate=0.0003,
    )
    print('\nStart training ...')
    model.fit(x,
              y,
              n_epoch=40,
              validation_set=(x_val, y_val),
              shuffle=True,
              batch_size=128,
              show_metric=True,
              snapshot_epoch=True,
              run_id='alex_TB5')
    print('\nStart saving ...')
    model.save(folder_to_save + '/model/model')
Esempio n. 2
0
def get_data(data_dir, hdf5):
    """This function loads in the data, either by loading images on the fly or by creating and
    loading from a hdf5 database.

    Args:
        data_dir: Root directory of the dataset.
        hdf5: Boolean. If true, (create and) load data from a hdf5 database.

    Returns:
        X: training images.
        Y: training labels.
        X_test: validation images.
        Y_test: validation labels."""

    # Get the filenames of the lists containing image paths and labels.
    train_file, val_file = build_dataset_index(data_dir)

    # Check if (creating and) loading from hdf5 database is desired.
    if hdf5:
        # Create folder to store dataset.
        if not os.path.exists('hdf5'):
            os.makedirs('hdf5')
        # Check if hdf5 databases already exist and create them if not.
        if not os.path.exists('hdf5/tiny-imagenet_train.h5'):
            from tflearn.data_utils import build_hdf5_image_dataset
            print ' Creating hdf5 train dataset.'
            build_hdf5_image_dataset(train_file, image_shape=(256, 256), mode='file',
                                     output_path='hdf5/tiny-imagenet_train.h5', categorical_labels=True, normalize=True)

        if not os.path.exists('hdf5/tiny-imagenet_val.h5'):
            from tflearn.data_utils import build_hdf5_image_dataset
            print ' Creating hdf5 val dataset.'
            build_hdf5_image_dataset(val_file, image_shape=(256, 256), mode='file',
                                     output_path='hdf5/tiny-imagenet_val.h5', categorical_labels=True, normalize=True)

        # Load training data from hdf5 dataset.
        h5f = h5py.File('hdf5/tiny-imagenet_train.h5', 'r')
        X = h5f['X']
        Y = h5f['Y']

        # Load validation data.
        h5f = h5py.File('hdf5/tiny-imagenet_val.h5', 'r')
        X_test = h5f['X']
        Y_test = h5f['Y']

        # Load images directly from disk when they are required.
    else:
        from tflearn.data_utils import image_preloader
        X, Y = image_preloader(train_file, image_shape=(64, 64), mode='file', categorical_labels=True, normalize=True,
                               filter_channel=True)
        X_test, Y_test = image_preloader(val_file, image_shape=(64, 64), mode='file', categorical_labels=True,
                                         normalize=True, filter_channel=True)

    # Randomly shuffle the dataset.
    # X, Y = shuffle(X, Y)

    return X, Y, X_test, Y_test
Esempio n. 3
0
def main(args):
    experiment = 'dl-histograms'
    channels = 1
    width = 64
    height = 64
    num_class = 2
    epochs = 15
    folds = 5
    architecturescount = 10
    architectureid = args.architectureid
    fold = args.fold
    test_file = '../../images/sampling/lists/histograms/test-images.txt'

    #       for architectureid in range(1,architecturescount):
    accuracies = []
    #for fold in range(1,folds):
    runid = '{}-architecture{}-fold{}'.format(experiment, architectureid, fold)
    arch = architecture(architectureid, width, height, channels, num_class)
    train_file = '../../images/sampling/lists/histograms/splits/train-cv-{}.txt'.format(
        fold)
    validate_file = '../../images/sampling/lists/histograms/splits/validate-cv-{}.txt'.format(
        fold)

    X, Y = image_preloader(train_file,
                           image_shape=(height, width),
                           mode='file',
                           categorical_labels=True,
                           normalize=True)
    valX, valY = image_preloader(validate_file,
                                 image_shape=(height, width),
                                 mode='file',
                                 categorical_labels=True,
                                 normalize=True)
    testX, testY = image_preloader(test_file,
                                   image_shape=(height, width),
                                   mode='file',
                                   categorical_labels=True,
                                   normalize=True)

    arch.fit(X,
             Y,
             n_epoch=epochs,
             validation_set=(valX, valY),
             snapshot_step=10,
             snapshot_epoch=False,
             show_metric=True,
             run_id=runid)
    arch.save('arch-id{}-fold{}.tfl'.format(architectureid, fold))
    #       accuracies.append(arch.evaluate(testX, testY)[0])
    #       accuracies=np.asarray(accuracies)
    accuracy = arch.evaluate(testX, testY)[0]
    append(experiment, architectureid, fold, accuracy)
Esempio n. 4
0
def filter_size_run_cnn(filter_size):
    image_size = 128
    strides = 4
    batch_size = 2
    if filter_size < 9:
        strides = 2

    print('\n\n\nstart with filter size: ' + str(filter_size) + '; and strides: ' + str(strides))
    folder_to_save = './out_grey/alex_f' + str(filter_size) + '_' + str(ITERATION)
    # folder_to_load = './out/alex_f' + str(filter_size) + '_' + str(ITERATION-1) + '/model/model'
    folder_to_load = ''

    x, y = image_preloader(TRAIN_DATA,
                           image_shape=(image_size, image_size),
                           # grayscale=True,
                           filter_channel=False,
                           mode='file',
                           files_extension=['.png'])
    x_val, y_val = image_preloader(VAL_DATA,
                                   image_shape=(image_size, image_size),
                                   # grayscale=True,
                                   filter_channel=False,
                                   mode='file',
                                   files_extension=['.png'])
    print('loaded_data')
    x = np.reshape(x, (len(x), 128, 128, 5))
    x_val = np.reshape(x_val, (len(x_val), 128, 128, 5))

    model = get_alex_model(
        filter_size=filter_size,
        folder_to_save=folder_to_save,
        folder_to_load=folder_to_load,
        image_size=image_size,
        strides=strides,
        learning_rate=0.0003,
        depth=5,
    )
    print('\nStart training ...')
    model.fit(x,
              y,
              n_epoch=50,
              validation_set=(x_val, y_val),
              shuffle=True,
              batch_size=batch_size,
              show_metric=True,
              snapshot_epoch=True,
              run_id='alex_TB5')
    print('\nStart saving ...')
    model.save(folder_to_save + '/model/model')
def get_data(data_dir, hdf5):
    train_file, val_file = build_dataset_index(data_dir)

    if hdf5:
        if not os.path.exists('hdf5'):
            os.makedirs('hdf5')
        if not os.path.exists('hdf5/tiny-imagenet_train.h5'):
            from tflearn.data_utils import build_hdf5_image_dataset
            build_hdf5_image_dataset(train_file,
                                     image_shape=(64, 64),
                                     mode='file',
                                     output_path='hdf5/tiny-imagenet_train.h5',
                                     categorical_labels=True,
                                     normalize=True)
        if not os.path.exists('hdf5/tiny-imagenet_val.h5'):
            from tflearn.data_utils import build_hdf5_image_dataset
            build_hdf5_image_dataset(val_file,
                                     image_shape=(64, 64),
                                     mode='file',
                                     output_path='hdf5/tiny-imagenet_val.h5',
                                     categorical_labels=True,
                                     normalize=True)

        h5f = h5py.File('hdf5/tiny-imagenet_train.h5', 'r')
        X = h5f['X']
        Y = h5f['Y']

        h5f = h5py.File('hdf5/tiny-imagenet_val.h5', 'r')
        X_test = h5f['X']
        Y_test = h5f['Y']
    else:
        from tflearn.data_utils import image_preloader
        X, Y = image_preloader(train_file,
                               image_shape=(64, 64),
                               mode='file',
                               categorical_labels=True,
                               normalize=True,
                               filter_channel=True)
        X_test, Y_test = image_preloader(val_file,
                                         image_shape=(64, 64),
                                         mode='file',
                                         categorical_labels=True,
                                         normalize=True,
                                         filter_channel=True)

    # Randomly shuffle the dataset.
    X, Y = shuffle(X, Y)
    return X, Y, X_test, Y_test
def make_prediction(MODEL_ARCHITECTURE, MODEL_WEIGHTS, PATH_FACES, N_PLOT):
    # Load model
    m = importlib.import_module(MODEL_ARCHITECTURE)

    # Load neural net architecture
    model = tflearn.DNN(
        tflearn.regression(m.network(),
                           optimizer=m.optimizer(),
                           loss='mean_square'))

    # Load neural net weights
    model.load(MODEL_WEIGHTS, weights_only=True)

    # Load images
    X, y = image_preloader(PATH_FACES,
                           image_shape=(96, 96),
                           mode='folder',
                           normalize=True)
    X = np.reshape(X, (-1, 96, 96, 1))  # Add 1 color channel

    # Predict keypoints
    Y = np.array(model.predict(X))

    # Plot scatter points and save to .png
    plot_samples(X, Y, N_PLOT, PATH_FACES)

    # Write results to .csv
    write_prediction(Y, PATH_FACES)
Esempio n. 7
0
def transfer(prjname, basenet, numclasses, dims):
    # load data
    tf.reset_default_graph()
    X1, Y1 = image_preloader(".\\data\\train\\classes",
                             image_shape=tuple(dims)[0:2],
                             mode="folder",
                             categorical_labels=True,
                             normalize=True,
                             files_extension=[".jpg", ".png"],
                             filter_channel=True)

    # retrain basenet on new dataset
    img_prep = ImagePreprocessing()
    # 2FIX: normalization should be done per class and per channel
    img_prep.add_featurewise_zero_center()
    #img_prep.add_featurewise_stdnorm()

    x1 = tflearn.input_data(shape=np.insert(np.asarray(dims, dtype=object), 0,
                                            None).tolist(),
                            data_preprocessing=img_prep,
                            name="input")
    mt1 = templates.fetch("vgg16")

    numepochs = 2
    validpct = 0.1
    batchsize = 3
    model1 = nets.train(mt1, x1, X1, Y1, numclasses, numepochs, validpct,
                        batchsize, "vgg16" + prjname)

    model1.save(".\\logs\\ckp\\" + "vgg16goteborg\\" + "vgg16" + prjname +
                ".ckpt")
Esempio n. 8
0
def getXY():
    X, Y = image_preloader(train,
                           image_shape=(80, 80),
                           mode='folder',
                           categorical_labels='True',
                           normalize=True)
    return X, Y
Esempio n. 9
0
def load_data():
    logging.info('preparing data, can take a while')
    return image_preloader(TRAINING_DATA_FILE,
                           image_shape=IMAGE_INPUT_SIZE,
                           mode='file',
                           filter_channel=True,
                           normalize=True)
Esempio n. 10
0
def alexnet_rocauc(
    image_size=128,
    strides=4,
    filter_size=11,
    folder_to_load='./out/alex_s128_2/model/model',
    title='alex net 160 11',
):
    x_val, y_val = image_preloader(VAL_DATA,
                                   image_shape=(image_size, image_size),
                                   mode='file',
                                   files_extension=['.png'])

    print('loaded_data')
    x_val = np.reshape(x_val, (len(x_val), 128, 128, 1))

    model = get_alex_model(
        filter_size=filter_size,
        folder_to_save='not_meter',
        folder_to_load=folder_to_load,
        image_size=image_size,
        strides=strides,
        learning_rate=0.0003,
        depth=1,
    )

    arr_pred, arr_val = predict_all(model, x_val, y_val)
    calculate_and_print_roc(arr_val, arr_pred, title=title)
Esempio n. 11
0
def zfnet_rocauc(
    image_size=128,
    strides=2,
    folder_to_load='./out_grey/zfnet_2/checkpoints-296040',
    title='ZF Net',
):
    x_val, y_val = image_preloader(VAL_DATA,
                                   image_shape=(image_size, image_size),
                                   mode='file',
                                   files_extension=['.png'])

    print('loaded_data')
    x_val = np.reshape(x_val, (len(x_val), 128, 128, 1))

    model = get_zf_model(
        filter_size=7,
        folder_to_save='not_meter',
        folder_to_load=folder_to_load,
        image_size=image_size,
        strides=strides,
        learning_rate=0.0003,
        channels=1,
    )

    arr_pred, arr_val = predict_all(model, x_val, y_val, step=200)
    calculate_and_print_roc(arr_val, arr_pred, title=title)
Esempio n. 12
0
def filter_size_run_cnn():
    image_size = 128
    strides = 2
    batch_size = 16

    print('\n\n\nstart with filter size: ')
    # folder_to_save = './out/zfnet_' + str(ITERATION)
    # folder_to_load = './out/zfnet_1/checkpoints-96226'
    folder_to_save = './out_grey/zfnet_' + str(ITERATION)
    # folder_to_load = './out_grey/zfnet_' + str(ITERATION-1) + '/model/model'
    folder_to_load = ''

    x, y = image_preloader(TRAIN_DATA,
                           grayscale=True,
                           image_shape=(image_size, image_size),
                           mode='file',
                           files_extension=['.png'])
    x_val, y_val = image_preloader(VAL_DATA,
                                   grayscale=True,
                                   image_shape=(image_size, image_size),
                                   mode='file',
                                   files_extension=['.png'])
    x = np.reshape(x, (len(x), 128, 128, 1))
    x_val = np.reshape(x_val, (len(x_val), 128, 128, 1))

    model = get_zf_model(
        filter_size=7,
        folder_to_save=folder_to_save,
        folder_to_load=folder_to_load,
        image_size=image_size,
        strides=strides,
        learning_rate=0.0003,
        channels=1,
    )
    print('\nStart training ...')
    model.fit(x,
              y,
              n_epoch=50,
              validation_set=(x_val, y_val),
              shuffle=True,
              batch_size=batch_size,
              show_metric=True,
              snapshot_epoch=True,
              run_id='alex_TB5')
    print('\nStart saving ...')
    model.save(folder_to_save + '/model/model')
Esempio n. 13
0
def get_data(data_dir):
    train_file, val_file, conf_file = build_dataset_index(data_dir)
    print(conf_file)
    from tflearn.data_utils import image_preloader
    # X_test, Y_test = image_preloader(val_file, image_shape=(64, 64), mode='file', categorical_labels=True, normalize=True, filter_channel=True)
    X_conf, Y_conf = image_preloader(conf_file, image_shape=(64, 64), mode='file', categorical_labels=True, normalize=True, filter_channel=True)
    # return X_test, Y_test, X_conf, Y_conf
    return X_conf, Y_conf
Esempio n. 14
0
def get_data_preloader():
    X, Y = image_preloader(DATASET_FILE,
                           image_shape=(input_width, input_width),
                           mode='file',
                           categorical_labels=True,
                           normalize=True,
                           grayscale=False)
    return X, Y
Esempio n. 15
0
def predict(prjname,basenet,predictclass,perclass,numclasses,dims,withtransfer):
    
    # load data
    tf.reset_default_graph()
    X1, Y1 = image_preloader(".\\data\\test\\classes",
                            image_shape=tuple(dims)[0:2],
                            mode="folder",
                            categorical_labels=True,
                            normalize=True,
                            files_extension=[".jpg",".png"],
                            filter_channel=True)
    
    # retrain vgg16 on goteborg
    img_prep = ImagePreprocessing()
    # 2FIX: normalization should be done per channel
    img_prep.add_featurewise_zero_center()
    #img_prep.add_featurewise_stdnorm()
    
    x1 = tflearn.input_data(shape=np.insert(np.asarray(dims, dtype=object),0,None).tolist(),
                            data_preprocessing=img_prep,
                            name="input")
    
    if withtransfer:
        checkpoint = "vgg16goteborg"
    else:
        checkpoint = "vgg16"
        
    mt1 = templates.fetch(checkpoint)
    #numclasses = 5
    model1, _, codelist = nets.getcodes(mt1, x1, numclasses)
    
    # prepare input for second stage
    # 2DO: should X2 be normalized? is ReLUing sufficient?
    pixclasses = numclasses + 1
    X2 = codes.formatcodes(codelist,model1,x1,X1)
    Y2 = utils.OHpixlabels(predictclass, perclass, pixclasses, dims[0], "test")
    
    # use second net on codes
    tf.reset_default_graph()

    x2 = tflearn.input_data(shape=np.insert(np.asarray(list(X2.shape)[1:], dtype=object),0,None).tolist(),
                            name='mlp_input')
    
    mt2 = templates.fetch("pixelnet2.2")
    model2, _, _ = nets.getcodes(mt2, x2, pixclasses)
    model2.load(".\\logs\\ckp\\" + "pixelnetgoteborg\\" + "pixelnet" + prjname + ".ckpt",weights_only=True)

    outv = model2.predict(X2)
    outv = np.asarray(outv)
    pixclass = np.argmax(outv[0,:,:,:], axis=2)
    
    preddir = ".\\data\\test\\preds"
    os.makedirs(preddir, exist_ok=True)
    np.save( preddir + "\\" "pred_0.npy",pixclass )
    print("bien!")
Esempio n. 16
0
	def predict_type(self,bookdir):
		# Import dataset
		X, Y = image_preloader(bookdir, image_shape=(128, 128), mode='file',
			                   categorical_labels=False, normalize=False)
		Y = to_categorical(Y, 3)
		print("-- Runbook Import Complete.")

		# Predict label
		prediction = self.model.predict(X)
		
		return prediction
Esempio n. 17
0
 def read_image(self):
     x, y = image_preloader(self.files_txt,
                            image_shape=(224, 224),
                            mode='file',
                            categorical_labels=True,
                            normalize=False,
                            files_extension=['.jpg', '.png'],
                            filter_channel=True)
     self.x_matrix = np.array(x)
     self.y_one_hot = np.array(y)
     return self.x_matrix, self.y_one_hot
Esempio n. 18
0
def prediction(img):
    dataset = "rgb_faces.txt"

    images, labels = image_preloader(dataset,
                                     image_shape=(128, 128),
                                     mode='file',
                                     categorical_labels=True,
                                     normalize=True)

    preProc = tflearn.DataPreprocessing()
    preProc.add_custom_preprocessing(resizeimg)

    acc = tflearn.metrics.Accuracy()

    network = input_data(shape=[None, 128, 128, 3])
    '''data_preprocessing=preProc'''
    ''', data_preprocessing=img_prep,
                         data_augmentation=img_aug'''

    network = conv_2d(network, 64, 3, activation='relu')
    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2, name='maxpool')

    network = conv_2d(network, 128, 3, activation='relu')
    network = conv_2d(network, 128, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2, name='maxpool')

    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2, name='maxpool')

    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = conv_2d(network, 512, 3, activation='relu')
    network = max_pool_2d(network, 2, strides=2, name='maxpool')

    network = fully_connected(network, 1024, activation='relu')
    network = dropout(network, 0.5)

    network = fully_connected(network, 512, activation='relu')
    network = dropout(network, 0.5)

    network = fully_connected(network, 5, activation='softmax')

    network = regression(network,
                         optimizer='sgd',
                         loss='categorical_crossentropy')

    model = tflearn.DNN(network, tensorboard_verbose=0)
    model.load('./rgb_faces.tflearn')

    return model.predict(img)
Esempio n. 19
0
def vgg_rocauc(
    folder_to_load='./out_grey/vgg_i1/checkpoints-185050',
    title='VGG Net',
):
    x_val, y_val = image_preloader(VAL_DATA,
                                   image_shape=(128, 128),
                                   mode='file',
                                   files_extension=['.png'])

    print('loaded_data')
    x_val = np.reshape(x_val, (len(x_val), 128, 128, 1))

    model = get_vgg_model(foler_to_save='not_meter', folder_to_load=folder_to_load, depth=1)

    arr_pred, arr_val = predict_all(model, x_val, y_val, step=200)
    calculate_and_print_roc(arr_val, arr_pred, title=title)
Esempio n. 20
0
def train():
    model_path = '.'
    file_list = './train_fvgg_emo.txt'
    X, Y = image_preloader(file_list, image_shape=(224,224), mode='file', categorical_labels=True,
                           normalize=False, files_extension=['.jpg', '.png'], filter_channel=True)

    classes = 7

    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center(mean=MEAN_VALUE, per_channel=True)

    x = tflearn.input_data(shape=[None, 224, 224, 3], name='input', data_preprocessing=img_prep)
    softmax = vgg16(x, classes)
    # default optimizer='adam', loss='categorical_crossentropy'
    regression = tflearn.regression(softmax, learning_rate=0.0001, restore=False)
    # tensorboard_verbose=3: Loss, Accuracy, Gradients, Weights, Activations, Sparsity.(Best visualization)
    model = tflearn.DNN(regression, checkpoint_path='./logs/vgg-finetuning/checkpoints/', max_checkpoints=3, tensorboard_verbose=2, tensorboard_dir='./logs/vgg-finetuning/summaries/')
    model_file = os.path.join(model_path, 'vgg16.tflearn')
    model.load(model_file, weights_only=True)

    # start finetuning
    model.fit(X, Y, n_epoch=20, validation_set=0.1, shuffle=True, show_metric=True, batch_size=64, snapshot_epoch=False, snapshot_step=200, run_id='vgg-finetuning')
    model.save('./logs/vgg-finetuning/vgg_finetune_emo.tfmodel')
    x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8',
                                restore=False)

    return x


data_dir = "/path/to/your/data"
model_path = "/path/to/your/vgg_model"
# the file gen by generated by gen_files_list.py
files_list = "/path/to/your/file/with/images"

from tflearn.data_utils import image_preloader

X, Y = image_preloader(files_list, image_shape=(224, 224), mode='file',
                       categorical_labels=True, normalize=False,
                       files_extension=['.jpg', '.png'], filter_channel=True)
# or use the mode 'floder'
# X, Y = image_preloader(data_dir, image_shape=(224, 224), mode='folder',
#                        categorical_labels=True, normalize=True,
#                        files_extension=['.jpg', '.png'], filter_channel=True)

num_classes = 10 # num of your dataset

# VGG preprocessing
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center(mean=[123.68, 116.779, 103.939],
                                     per_channel=True)
# VGG Network
x = tflearn.input_data(shape=[None, 224, 224, 3], name='input',
                       data_preprocessing=img_prep)
Esempio n. 22
0
                                restore=False)

    return x


data_dir = "/path/to/your/data"
model_path = "/path/to/your/vgg_model"
# the file gen by generated by gen_files_list.py
files_list = "/path/to/your/file/with/images"

from tflearn.data_utils import image_preloader

X, Y = image_preloader(files_list,
                       image_shape=(224, 224),
                       mode='file',
                       categorical_labels=True,
                       normalize=True,
                       files_extension=['.jpg', '.png'],
                       filter_channel=True)
# or use the mode 'floder'
# X, Y = image_preloader(data_dir, image_shape=(224, 224), mode='folder', categorical_labels=True, normalize=True,
#                        files_extension=['.jpg', '.png'], filter_channel=True)
num_classes = 10  # num of your dataset

softmax = vgg16(num_classes)
regression = regression(softmax,
                        optimizer='adam',
                        loss='categorical_crossentropy',
                        learning_rate=0.001,
                        restore=False)
Esempio n. 23
0
network = regression(network, optimizer='adam', 
                        loss='categorical_crossentropy', 
                        learning_rate=0.01) 
# Define model
## To Do
    ## Define model and assign network. Same as training.
model = tflearn.DNN(network, tensorboard_verbose=0)
# Load Model into model object
## To Do.
    ## Use the model.load() function
model.load('a2.tfl')

# load test images
from tflearn.data_utils import image_preloader
import numpy as np
# Load path/class_id image file:
dataset_file = 'my_dataset.txt'

X_test, Y_test = image_preloader(dataset_file, image_shape=(32, 32), mode='file',
                       categorical_labels=True, normalize=True,
                       files_extension=['.jpg', '.png'], filter_channel=True)
X_test = np.array(X_test)
Y_test = np.array(Y_test)


# predict test images label
y_pred = model.predict(X_test)

# Compute accuracy of trained model on test images
print ("Accuracy: ",np.sum(np.argmax(y_pred, axis=1) == np.argmax(Y_test, axis=1))*100/Y_test.shape[0],"%")
Esempio n. 24
0
from tflearn.data_utils import image_preloader
import tensorflow as tf

import sys
from pathlib import Path

import numpy as np
from PIL import Image
from tflearn.data_utils import resize_image

tf.reset_default_graph()

#Load Datasets
X, Y = image_preloader('C:\\Users\\lukas\\Documents\\Uni\\KI\\kat\\ROOT',
                       image_shape=(100, 125),
                       mode='folder',
                       categorical_labels=True,
                       normalize=True)

#Convolutional Neural Network
network = input_data(shape=[None, 125, 100, 4])
network = conv_2d(network, 100, 10, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.5)
network = fully_connected(network, 2, activation='softmax')
network = regression(network,
                     optimizer='adam',
NUM_PERSON = 1000
NUM_CLASS = 3755
NUM_TEST = int((NUM_CLASS * NUM_PERSON) * TEST_RATIO)
NUM_TRAIN = NUM_CLASS * NUM_PERSON - NUM_TEST
BATCH_SIZE = 128
TRAIN_DIR = '/data0/jiahao/hcl/train'
TEST_DIR = '/data0/jiahao/hcl/test'
EPOCH = 1
DROPOUT_PROB = 0.6

from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_utils import image_preloader
train_data, train_labels = image_preloader(TRAIN_DIR,
                                           image_shape=IMG_SIZE,
                                           mode='folder',
                                           normalize=True,
                                           grayscale=True,
                                           categorical_labels=True,
                                           files_extension=None,
                                           filter_channel=False)
test_data, test_labels = tflearn.data_utils.image_preloader(
    TEST_DIR,
    image_shape=IMG_SIZE,
    mode='folder',
    normalize=True,
    grayscale=True,
    categorical_labels=True,
    files_extension=None,
    filter_channel=False)

img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center(mean=0)
Esempio n. 26
0
	tower_mixed = merge([tower_conv,tower_conv1_2], mode='concat', axis=3)
	tower_out = relu(batch_normalization(conv_2d(tower_mixed, net.get_shape()[3], 1, bias=False, activation=None, name='Conv2d_1x1')))
	net += scale * tower_out
	if activation:
		if isinstance(activation, str):
			net = activations.get(activation)(net)
		elif hasattr(activation, '__call__'):
			net = activation(net)
		else:
			raise ValueError("Invalid Activation.")
	return net

if __name__ == '__main__':
	generate_dataset_path('dataset.txt', True)

	X, Y = image_preloader(os.path.join(USER_ROOT, 'dataset.txt'), image_shape=(150, 150),
						   mode='file', categorical_labels=True, normalize=False)

	X, Y = shuffle(X, Y)
	X = 2 * (X / 255) - 1
	# X_train, y_train, X_test, y_test = split_training_testing(X, Y)
	# del X, Y

	num_classes = 3
	dropout_keep_prob = 0.8

	network = input_data(shape=[None, 150, 150, 3])
	conv1a_3_3 = relu(batch_normalization(conv_2d(network, 32, 3, strides=2, bias=False, padding='VALID', activation=None, name='Conv2d_1a_3x3')))
	del network
	conv2a_3_3 = relu(batch_normalization(conv_2d(conv1a_3_3, 32, 3, bias=False, padding='VALID', activation=None, name='Conv2d_2a_3x3')))
	del conv1a_3_3
	conv2b_3_3 = relu(batch_normalization(conv_2d(conv2a_3_3, 64, 3, bias=False, activation=None, name='Conv2d_2b_3x3')))
Esempio n. 27
0
    videCapture.release()
    cv.destroyAllWindows()






def ResizeImg(arr):
    arr = np.reshape(arr, (-1, 128, 128, 3))
    return arr


dataset = "rgb_faces.txt"

images, labels = image_preloader(dataset, image_shape=(128, 128, 3), mode='file', categorical_labels=True, normalize=True)

'''
test_img = np.reshape(test_img, (-1, 128, 128, 1))
test_img_2 = np.reshape(test_img, (-1, 128, 128, 1))
test_img_3 = np.reshape(test_img, (-1, 128, 128, 1))
'''

preProc = tflearn.DataPreprocessing()
preProc.add_custom_preprocessing(ResizeImg)
#images = np.reshape(images, (-1, 128, 128, 1))
#labels = np.reshape(labels, (-1, 2))

'''
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization, batch_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import image_preloader

train_file = '../../images/sampling/train.txt'
test_file = '../../images/sampling/test.txt'

channels = 1
width = 64
height = 50

X, Y = image_preloader(train_file,
                       image_shape=(height, width),
                       mode='file',
                       categorical_labels=True,
                       normalize=True)
testX, testY = image_preloader(test_file,
                               image_shape=(height, width),
                               mode='file',
                               categorical_labels=True,
                               normalize=True)

network = input_data(shape=[None, width, height], name='input')
network = tflearn.layers.core.reshape(network, [-1, width, height, 1],
                                      name='Reshape')

network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
network = batch_normalization(network)
network = conv_2d(network, 64, 1, activation='relu', regularizer="L2")
import deepneuralnet as net
import numpy as np
from tflearn.data_utils import image_preloader
model = net.model
path_to_model = './ZtrainedNet/final-model.tfl'
model.load(path_to_model)
X, Y = image_preloader(target_path='./validate', image_shape=(100,100), mode='folder',
 grayscale=False, categorical_labels=True, normalize=True)
X = np.reshape(X, (-1, 100, 100, 3))
for i in range(0, len(X)):
 iimage = X[i]
 icateg = Y[i]
 result = model.predict([iimage])[0]
 prediction = result.tolist().index(max(result))
 reality = icateg.tolist().index(max(icateg))
 if prediction == reality:
     print("image %d CORRECT " % i + "\n", end='')
 else:
     print("image %d WRONG " % i + "\n", end='')
     print(result)
Esempio n. 30
0
    # Data loading
    if(os.path.exists('mean.pkl')):
        #load mean from pickle file
        pkl_file = open('Inceptionmean.pkl', 'rb')
        mean = pickle.load(pkl_file)
        pkl_file.close()
        
        print('mean pickle is here!')
    else:
        # Load mean from original image files:
        dataset_file = 'trainfilelist.txt'

        # Build the preloader array, resize images to sample size
        from tflearn.data_utils import image_preloader
        X, Y = image_preloader(dataset_file, image_shape=(FLAGS.sample_size, FLAGS.sample_size),
                               mode='file', categorical_labels=True,
                               normalize=True)

        #Reshape X
        X = np.array(X)
        X = X.reshape([-1, FLAGS.sample_size, FLAGS.sample_size, 1])
        X, mean = du.featurewise_zero_center(X)

        #write to pickle file
        pkl_file = open('Inceptionmean.pkl', 'wb')
        pickle.dump(mean, pkl_file)
        pkl_file.close()

    #load resnet model
    network = create_Inception(3)
    model = tflearn.DNN(network)
Esempio n. 31
0
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import image_preloader

# import tflearn.datasets.oxflower17 as oxflower17
# X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227)

import win_unicode_console
win_unicode_console.enable()

X, Y = image_preloader(
    target_path=r'C:/Users/Administrator/Desktop/all_test/lungall',
    image_shape=(227, 227),
    mode='folder',
    normalize=True,
    grayscale=False,
    categorical_labels=True)


def my_func(x):
    x_list_to_array = np.array(x)
    x_s = x_list_to_array.reshape((-1, 227, 227, 1))

    #one channel to 3 channel
    a = x_s[:, :, :, 0]
    a = a.reshape((-1, 227, 227, 1))
    x = np.concatenate((x_s, a), axis=3)
    x = np.concatenate((x, a), axis=3)
    return x
'''
Coding Just for Fun
Created by burness on 16/9/10.
'''
from __future__ import division, print_function, absolute_import

import tflearn
from tflearn.data_utils import image_preloader

# Residual blocks
# 32 layers: n=5, 56 layers: n=9, 110 layers: n=18
n = 5

# Data loading
X,Y = image_preloader('files_list', image_shape = (224,224),mode='file',categorical_labels=True,normalize=True,files_extension=['.jpg', '.png'])


# Building Residual Network
net = tflearn.input_data(shape=[None, 224, 224, 3], name='input',)

net = tflearn.conv_2d(net, 64, 7,strides=2, regularizer='L2', weight_decay=0.0001)
net = tflearn.residual_block(net, n, 16)
net = tflearn.residual_block(net, 1, 32, downsample=True)
net = tflearn.residual_block(net, n-1, 32)
net = tflearn.residual_block(net, 1, 64, downsample=True)
net = tflearn.residual_block(net, n-1, 64)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, 10, activation='softmax')