예제 #1
0
import numpy as np
import os
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import load_image

SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
num = 20
imgs = []
for i in range(1, num + 1):
    imgs.append(
        np.asarray(
            load_image("%s/cnn_dataset_mini/miku/%s.jpg" % (SCRIPT_PATH, i))))
for i in range(1, num + 1):
    imgs.append(
        np.asarray(
            load_image("%s/cnn_dataset_mini/no-miku/%s.jpg" %
                       (SCRIPT_PATH, i))))
imgs = np.array(imgs)
y_data = np.r_[np.c_[np.ones(num), np.zeros(num)], np.c_[np.zeros(num),
                                                         np.ones(num)]]
print(imgs.shape)
print(y_data.shape)

x_test = []
for i in range(1, 11):
    x_test.append(
        np.asarray(
                        for row in range(num_of_class)]

    csv_dir = "/home/goerlab/Bilinear-CNN-TensorFlow/core/model/20180110/record/"
    csvfile = open(csv_dir + "test_detail.csv", "wb")
    writer = csv.writer(csvfile)
    writer.writerow(["labels", "prediction", "confidence_score", "file_name"])
    image_dir = "/media/goerlab/My Passport/Welder_detection/dataset/20180109/Data/val/"
    cnt = 0
    for dirc in os.listdir(image_dir):
        subdir = image_dir + dirc + '/'
        if not os.path.isfile(subdir):
            for subdirc in os.listdir(subdir):

                file_name = subdir + subdirc
                print(file_name)
                file_image = data_utils.load_image(file_name)
                file_image = data_utils.resize_image(file_image, 448, 448)
                #file_image=cv2.imread(file_name)
                #file_image=cv2.resize(file_image,(448,448))

                #file_image=file_image*(1./255)-0.5
                trans_image = np.asarray(file_image).reshape((1, 448, 448, 3))
                #trans_image=trans_image(1./255)-0.5

                real_predict, real_fc3l, real_confidence_score = sess.run(
                    [prediction, vgg.fc3l, confidence_score],
                    feed_dict={imgs: trans_image})
                #writer.writerow([str(dirc),real_predict[0],real_proba[0],file_name])
                # print(trans_image)
                real_label = int(dirc)
                print("label:%d, prediction:%d, confidence score:%f" %
예제 #3
0
network = conv_2d(network, 64, 5, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 128, 5, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 512, activation='relu')
#network = dropout(network, 0.8)
network = fully_connected(network, 1024, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.00001,
                     loss='categorical_crossentropy',
                     name='target')

model = tflearn.DNN(network)
model.load('miku_model.tflearn')

imgs = []
num = 4
for i in range(1, num + 1):
    img = load_image("/tmp/t%s.jpg" % (i))
    img = img.resize((100, 100))
    img_arr = np.asarray(img)
    imgs.append(img_arr)
imgs = np.array(imgs)
print imgs.shape
print np.round(model.predict(imgs))
예제 #4
0
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 512, activation='relu')
network = fully_connected(network, 1024, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.00001,
                     loss='categorical_crossentropy',
                     name='target')

model = tflearn.DNN(network)
model.load('miku_model.tflearn')

#Load test data

imgs = []
num = 1
for i in range(1, num + 1):
    img = load_image("test/test_chuyin%s.jpg" % (i))
    img = img.resize((100, 100))
    img_arr = np.asarray(img)
    imgs.append(img_arr)
imgs = np.array(imgs)

#predict

print np.round(model.predict(imgs))

#output
예제 #5
0
    image_dir = "/media/goerlab/My Passport/20180211_HistoryImage/HistoryImage/Need_result_5/"
    cnt = 0
    print("here")
    for root, dirs, files in os.walk(image_dir):
        print(root)
        print("files len:%d" %(len(files)))
        for i in files:
            print("file:%s" % (i))
            filename = os.path.splitext(i)
            if filename[1] == '.bmp':
                image_file = root + "/" + i

            real_label = 0
            # file_name=image_dir+dirc
            print(image_file)
            file_image = data_utils.load_image(image_file)
            #croped = crop_image(file_image)
            file_image = data_utils.resize_image(file_image, 448, 448)
            # file_image=cv2.imread(file_name)
            # file_image=cv2.resize(file_image,(448,448))



            # file_image=file_image*(1./255)-0.5
            trans_image = np.asarray(file_image).reshape((1, 448, 448, 3))
            # trans_image=trans_image(1./255)-0.5

            real_predict, real_fc3l, real_confidence_score, top1_out, top2_out = sess.run(
                [prediction, vgg.fc3l, confidence_score, top1, top2], feed_dict={imgs: trans_image})
            # writer.writfor i in files:
            print("file:%s" % (i))
예제 #6
0
import tensorflow as tf
import numpy as np
import os
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import load_image

SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
num = 548
imgs = []
for i in range(1, num + 1):
    imgs.append(np.asarray(load_image("%s/endo/%s.tif" % (SCRIPT_PATH, i))))
for i in range(1, num + 1):
    imgs.append(np.asarray(load_image("%s/noen/%s.tif" % (SCRIPT_PATH, i))))
imgs = np.array(imgs)
y_data = np.r_[np.c_[np.ones(num), np.zeros(num)], np.c_[np.zeros(num),
                                                         np.ones(num)]]
print(imgs.shape)
print(y_data.shape)

x_test = []
for i in range(1, 11):
    x_test.append(
        np.asarray(load_image("%s/TestSet/%s.tif" % (SCRIPT_PATH, i))))
x_test = np.array(x_test)
y_test = np.r_[np.c_[np.ones(5), np.zeros(5)], np.c_[np.zeros(5), np.ones(5)]]
print(x_test.shape)
print(y_test.shape)
import os
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import load_image

# Enable Tensorboard
# tensorboard --logdir='/tmp/tflearn_logs'

SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
num = 20
imgs = []
for i in range(1, num + 1):
    imgs.append(np.asarray(load_image("%s/Apple/%s.jpeg" % (SCRIPT_PATH, i))))
for i in range(1, num + 1):
    imgs.append(np.asarray(load_image("%s/Banana/%s.jpeg" % (SCRIPT_PATH, i))))
imgs = np.array(imgs)
y_data = np.r_[np.c_[np.ones(num), np.zeros(num)], np.c_[np.zeros(num),
                                                         np.ones(num)]]
print(imgs.shape)
print(y_data.shape)

test = []
for i in range(1, 11):
    test.append(np.asarray(load_image("%s/Test/%s.jpeg" % (SCRIPT_PATH, i))))
x_test = np.array(test)
y_test = np.r_[np.c_[np.ones(5), np.zeros(5)], np.c_[np.zeros(5), np.ones(5)]]
print(x_test.shape)
print(y_test.shape)
예제 #8
0
network = conv_2d(network, 128, 5, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 512, activation='relu')
network = fully_connected(network, 1024, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network,
                     optimizer='adam',
                     learning_rate=0.00001,
                     loss='categorical_crossentropy',
                     name='target')

model = tflearn.DNN(network)
model.load('apple_model.tflearn')

#Load test data

SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
imgs = []
num = 4
for i in range(1, num + 1):
    img = load_image("%s/Testing/File%s.jpeg" % (SCRIPT_PATH, i))
    img = img.resize((100, 100))
    img_arr = np.asarray(img)
    imgs.append(img_arr)
imgs = np.array(imgs)

#predict

print(np.round(model.predict(imgs)))
예제 #9
0
import numpy as np
import os
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import load_image


SCRIPT_PATH = os.path.dirname(os.path.abspath( __file__ ))

num = 20
imgs = []
for i in range(1, num + 1):
    imgs.append(np.asarray(load_image("%s/miku/%s.jpg" % (SCRIPT_PATH, i))))
for i in range(1, num + 1):
    imgs.append(np.asarray(load_image("%s/no-miku/%s.jpg" % (SCRIPT_PATH, i))))
imgs = np.array(imgs)
y_data = np.r_[np.c_[np.ones(num), np.zeros(num)],np.c_[np.zeros(num), np.ones(num)]]
print imgs.shape
print y_data.shape
x_test = []
for i in range(1, 11):
    x_test.append(np.asarray(load_image("%s/test-set/%s.jpg" % (SCRIPT_PATH, i))))
x_test =  np.array(x_test)
y_test = np.r_[np.c_[np.ones(5), np.zeros(5)],np.c_[np.zeros(5), np.ones(5)]]
print x_test.shape
print y_test.shape

# Building convolutional network