コード例 #1
0
def main(datasetdir, lv):
    start = time.time()
    """
    評価コードのメイン
    :param datasetdir データセットを格納したディレクトリへのパス
    """

    # 初期化
    print("Now evaluation...")

    alcon = AlconUtils(datasetdir)
    myalgorithm = MyAlgorithm(datasetdir)

    # ターゲットの読み込み
    fn = "target_lv" + lv + "_test_5.csv"
    alcon.load_annotations_target(fn)

    results = {}
    prev = None
    # 1ターゲットずつ認識していくループ
    for bb_id, target in alcon.targets.items():
        img_file_id, *bb = target
        img_filename = os.path.join(datasetdir, "images", img_file_id + ".jpg")

        if prev != img_filename:
            # ページ全体の画像
            full_img = cv2.imread(img_filename)
            prev = img_filename

        # 認識処理(ここで各自のアルゴリズムを呼び出します)
        result = myalgorithm.predict(full_img, bb)

        # boundingbox id と紐付けて保存
        results[bb_id] = result

    # 評価
    fnGround = "groundtruth_lv" + lv + "_test_5.csv"
    alcon.load_annotations_ground(fnGround)
    alcon.evaluation(results)

    # ファイルに結果を書き込む
    alcon.write_results(results)

    elapsed_time = time.time() - start
    print("elapsed_time:", elapsed_time, "[sec]")
コード例 #2
0
def read_data(datasetdir, lv):
    alcon = AlconUtils(datasetdir)

    # アノテーションの読み込み
    fn = "target_lv" + lv + ".csv"
    alcon.load_annotations_target(fn)

    fn = "groundtruth_lv" + lv + ".csv"
    alcon.load_annotations_ground(fn)

    # KNNモデルの作成
    dataset = {}
    for bb_id, target in alcon.targets.items():
        img_filename = alcon.get_filename_char(bb_id)
        code = alcon.ground_truth[bb_id][0]
        if code not in dataset:
            dataset[code] = []
        #if len(dataset[code]) == 100:
        #    continue
        img = cv2.imread(img_filename)
        feature = MyAlgorithm.feature_extraction(img)
        dataset[code].append(feature)

    labels = []
    data = []
    classes = sorted(dataset.keys())
    for label, values in dataset.items():
        labels += [classes.index(label)] * len(values)
        data += values

    data = np.asarray(data, dtype=np.float)
    labels = np.asarray(labels, dtype=np.int)

    return data, labels, classes
コード例 #3
0
def main(datasetdir,lv):
    input_shape = (64, 64, 3)

    # 初期化
    alcon = AlconUtils(datasetdir)

    # アノテーションの読み込み
    fn = "target_lv" + lv + "_samp_0.9.csv"
    alcon.load_annotations_target(fn)

    fn = "groundtruth_lv" + lv + "_samp_0.9.csv"
    alcon.load_annotations_ground(fn)

    # CNNモデルの作成
    dataset = {}
    for bb_id, target in alcon.targets.items():
        img_filename = alcon.get_filename_char( bb_id )
        code = alcon.ground_truth[bb_id][0]
        if code not in dataset:
            dataset[code] = []
        img = cv2.imread( img_filename )
        img = cv2.resize(img, (input_shape[0], input_shape[1]))
        img = np.asarray(img, dtype=np.float) / np.max(img)
        dataset[code].append(img)

    labels = []
    data = []
    classes = sorted(dataset.keys())
    for label, values in dataset.items():
        labels += [classes.index(label)] * len(values)
        data += values

    data = np.asarray(data, dtype=np.float)
    labels = np.asarray(labels, dtype=np.int)
    labels_c = keras.utils.to_categorical(labels)

    # ネットワークの学習
    model = Sequential()
    model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
    model.add(MaxPooling2D(pool_size=(3, 3)))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(3, 3)))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(len(classes), activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adam(),
                  metrics=['accuracy'])

    model.fit(data, labels_c, batch_size=100, epochs=100, verbose=1)

    outputfile = os.path.join(datasetdir, "model.h5")
    classfile = os.path.join(datasetdir, "classes.pkl")
    model.save(outputfile)
    joblib.dump(classes, classfile)
コード例 #4
0
def main(datasetdir,lv):

    # 初期化
    alcon = AlconUtils(datasetdir)

    # アノテーションの読み込み
    fn = "target_lv" + lv + "_samp_5.csv"
    alcon.load_annotations_target(fn)

    fn = "groundtruth_lv" + lv + "_samp_5.csv"
    alcon.load_annotations_ground(fn)

    
    # KNNモデルの作成
    dataset = {}
    for bb_id, target in alcon.targets.items():
        img_filename = alcon.get_filename_char( bb_id )
        code = alcon.ground_truth[bb_id][0]
        if code not in dataset:
            dataset[code] = []
        # if len(dataset[code]) == 5:
        #     continue
        img = cv2.imread( img_filename )
        feature = MyAlgorithm.feature_extraction(img)
        dataset[code].append(feature)

    labels = []
    data = []
    classes = sorted(dataset.keys())
    for label, values in dataset.items():
        labels += [classes.index(label)] * len(values)
        data += values

    data = np.asarray(data, dtype=np.float)
    labels = np.asarray(labels, dtype=np.int)

    classifier = KNeighborsClassifier() #編集
    classifier.fit(data, labels)

    outputfile = "./model.pkl"
    joblib.dump((classes, classifier), outputfile)
コード例 #5
0
from user_function import MyAlgorithm
from alcon_utils import AlconUtils
import cv2

import os.path
import pickle

datasetdir = "../../dataset/"
annotation_name = "iiyama_0.1"
"""
評価コードのメイン
:param datasetdir データセットを格納したディレクトリへのパス
"""

# 初期化
alcon = AlconUtils(datasetdir)
myalgorithm = MyAlgorithm(datasetdir)

# ターゲットの読み込み
file_name_last = "_lv1_" + annotation_name + ".csv"
alcon.load_annotations_target("target" + file_name_last)

imgs = {}
results_pre = {}
# 1ターゲットずつ認識していくループ
print("coco")

def predict_preprocess(full_img, bbox):
# 対象領域を切り出す
    x, y, w, h = bbox
    target_img = full_img[y:y+h, x:x+w, :]
コード例 #6
0
def main(datasetdir, lv):
    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.95
    backend.tensorflow_backend.set_session(tensorflow.Session(config=config))

    # 初期化
    alcon = AlconUtils(datasetdir)

    # アノテーションの読み込み
    alcon.load_annotations_target("target_lv" + lv + ".csv")
    alcon.load_annotations_ground("groundtruth_lv" + lv + ".csv")

    dataset = {}
    for bb_id, target in alcon.targets.items():
        code = alcon.ground_truth[bb_id][0]
        if code not in dataset:
            dataset[code] = []
        # if len(dataset[code]) == 10:
        #     continue
        img_filename = alcon.get_filename_char(bb_id)
        img = cv2.imread(img_filename)
        feature = MyAlgorithm.feature_extraction(img)
        dataset[code].append(feature)

    labels = []
    data = []
    classes = sorted(dataset.keys())

    for label, values in dataset.items():
        labels += [classes.index(label)] * len(values)
        data += values

    batch_size = 128
    num_classes = 46
    epochs = 12
    img_rows, img_cols = 32, 32
    channel = 1

    y_train = keras.utils.to_categorical(labels, num_classes)

    classifier = Sequential()
    classifier.add(
        keras.layers.normalization.BatchNormalization(input_shape=(img_rows,
                                                                   img_cols,
                                                                   channel)))
    classifier.add(Conv2D(32, (3, 3), activation='relu'))
    classifier.add(Conv2D(64, (3, 3), activation='relu'))
    classifier.add(MaxPooling2D(pool_size=(4, 4)))
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(Dropout(0.5))
    classifier.add(Flatten())
    classifier.add(Dense(128, activation='relu'))
    classifier.add(Dropout(0.5))
    classifier.add(Dense(num_classes, activation='softmax'))

    classifier.compile(loss=keras.losses.categorical_crossentropy,
                       optimizer=keras.optimizers.Nadam(),
                       metrics=['accuracy'])
    classifier.summary()

    numpy_data = numpy.asarray(data, dtype=numpy.float)
    float_data = numpy_data.reshape(numpy_data.shape[0],img_rows,img_cols, channel )\
                           .astype('float32')

    numpy_labels = numpy.asarray(labels, dtype=numpy.int)
    y_train = keras.utils.to_categorical(numpy_labels, num_classes)
    classifier.fit(float_data,
                   y_train,
                   batch_size=batch_size,
                   epochs=epochs,
                   verbose=1,
                   validation_data=None)

    outputfile = "./model.pkl"
    outputfile2 = "./model2.pkl"
    joblib.dump(classes, outputfile)
    classifier.save(outputfile2)
コード例 #7
0
ファイル: train.py プロジェクト: IKKO-Ohta/ALCON16
def main(datasetdir,lv):

    
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1.0
    K.tensorflow_backend.set_session(tf.Session(config=config))
    
    # 初期化
    alcon = AlconUtils(datasetdir)

    # アノテーションの読み込み
    fn = "target_lv" + lv + ".csv"


    alcon.load_annotations_target(fn)

    fn = "groundtruth_lv" + lv + ".csv"
    alcon.load_annotations_ground(fn)

    
    # KNNモデルの作成
    dataset = {}
    print(len(alcon.targets.items()))
    for bb_id, target in alcon.targets.items():
        img_filename = alcon.get_filename_char( bb_id )
        code = alcon.ground_truth[bb_id][0]
        if code not in dataset:
            dataset[code] = []
        if len(dataset[code]) == 100:
            continue
        img = cv2.imread( img_filename )
        feature = MyAlgorithm.feature_extraction(img)
        dataset[code].append(feature)

    labels = []
    data = []
    classes = sorted(dataset.keys())
    print(len(dataset.items()))
    for label, values in dataset.items():
        labels += [classes.index(label)] * len(values)
        data += values

    data = np.asarray(data, dtype=np.float)
    labels = np.asarray(labels, dtype=np.int)

    print(data.shape)
    print(labels.shape)
#   classifier = KNeighborsClassifier()
#   classifier = RandomForestClassifier()
#   classifier.fit(data, labels)

    batch_size = 128
    num_classes = 46
    epochs = 12
    img_rows, img_cols = 32, 32
#    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = data
    y_train = labels
    x_train = x_train.reshape(x_train.shape[0],img_rows,img_cols,3)
#    x_test = x_test.reshape(x_test.shape[0],img_rows,img_cols,1)
    input_shape = (img_rows, img_cols, 3)
    x_train = x_train.astype('float32')
#    x_test = x_test.astype('float32')
  
#    x_test /= 255
    
    y_train = keras.utils.to_categorical(y_train, num_classes)
#    y_test = keras.utils.to_categorical(y_test, num_classes)
     
    
    
    
    classifier = Sequential()

    classifier.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=input_shape,padding='same'))
    classifier.add(Conv2D(64,(3,3),activation='relu',padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2,2)))
    classifier.add(Dropout(0.25))
    classifier.add(Flatten())
    classifier.add(Dense(128,activation='relu'))
    classifier.add(Dropout(0.5))
    classifier.add(Dense(num_classes, activation='softmax'))
    
    classifier.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.Adadelta(),metrics=['accuracy'])


    classifier.fit(x_train,y_train,batch_size=batch_size,epochs=epochs,verbose=1,validation_data=None)

#    classifier.fit(data,labels)


    outputfile = "./model.pkl"
    outputfile2 = "./model2.pkl"
    joblib.dump(classes, outputfile)
    classifier.save(outputfile2)
コード例 #8
0
ファイル: NN_train3.py プロジェクト: seigenji/alcon_src
def main(datasetdir, lv, length):
    pr = cProfile.Profile()
    pr.enable()
    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.93
    keras.backend.tensorflow_backend.set_session(
        tensorflow.Session(config=config))

    # 初期化
    alcon = AlconUtils(datasetdir)

    # アノテーションの読み込み
    alcon.load_annotations_target("target_lv1_samp_0.9.csv")
    alcon.load_annotations_ground("groundtruth_lv1_samp_0.9.csv")

    dataset = {}
    for bb_id, target in alcon.targets.items():
        code = alcon.ground_truth[bb_id][0]
        if code not in dataset:
            dataset[code] = []
        if len(dataset[code]) == int(length):
            continue
        img_filename = alcon.get_filename_char(bb_id)
        img = cv2.imread(img_filename)
        feature = MyAlgorithm.feature_extraction(img)
        dataset[code].append(feature)

    labels = []
    data = []
    classes = sorted(dataset.keys())

    for label, values in dataset.items():
        labels += [classes.index(label)] * len(values)
        data += values

    num_classes = 46
    input_shape = (32, 32, 1)  # img_rows img_cols channel

    classifier = keras.models.Sequential()
    classifier.add(
        keras.layers.normalization.BatchNormalization(input_shape=input_shape))
    classifier.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))  # 30*30
    classifier.add(Conv2D(64, (3, 3), activation='relu'))  # 28*28
    classifier.add(Dropout(0.5))
    classifier.add(MaxPooling2D(pool_size=(4, 4)))  # 7*7
    classifier.add(Flatten())
    classifier.add(Dense(128, activation='relu'))
    classifier.add(Dropout(0.5))
    classifier.add(Dense(num_classes, activation='softmax'))

    classifier.compile(loss=keras.losses.categorical_crossentropy,
                       optimizer=keras.optimizers.Nadam(),
                       metrics=['accuracy'])
    classifier.summary()

    x_data = np.asarray(data).reshape(len(data), *input_shape)
    y_train = keras.utils.to_categorical(labels, num_classes)
    classifier.fit(x_data, y_train, batch_size=84, epochs=12)

    joblib.dump(classes, "./model.pkl")
    classifier.save("./model2.pkl")
    pr.disable()
    pstats.Stats(pr).sort_stats('tottime').print_stats(5)
コード例 #9
0
    main(sys.argv[1], sys.argv[2])

# In[15]:

main_keras_NN("/share/alcon/dataset/", "1")

# In[ ]:

model = keras.models.load_model('./model.pkl')

# In[5]:

get_ipython().magic('pylab inline')
import matplotlib.pyplot as plt
alcon = AlconUtils("/share/alcon/dataset/")
# アノテーションの読み込み
fn = "target_lv1.csv"
alcon.load_annotations_target(fn)
fn = "groundtruth_lv1.csv"
alcon.load_annotations_ground(fn)

idx = '1200'
rect = alcon.targets[idx][1:5]
image = cv2.imread("/share/alcon/dataset/images/" + alcon.targets[idx][0] +
                   ".jpg")
#cv2.rectangle(image, rect[0:2], (rect[0]+rect[2],rect[1]+rect[3]),(255,0,0), 2)
#plt.imshow(image[:,:,(2,1,0)])
plt.imshow(image[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2], :])
#print ("/share/alcon/dataset/images"+alcon.targets[idx][0]+".jpg")
x = image[rect[1]:rect[1] + rect[3], rect[0]:rect[0] + rect[2], :].copy()
コード例 #10
0
ファイル: Alcon.py プロジェクト: seigenji/alcon_src
# In[2]:

get_ipython().magic('pylab inline')
import matplotlib.pyplot as plt


# In[3]:

import numpy as np
import cv2
from alcon_utils import AlconUtils


# In[4]:

alcon = AlconUtils("/share/alcon/dataset/")


# In[5]:

alcon.load_annotations_target("target_lv2.csv")
alcon.load_annotations_ground("groundtruth_lv2.csv")


# In[6]:

print (alcon.targets['1000'])
print (alcon.ground_truth['1000'])


# In[8]:
コード例 #11
0
ファイル: read_dataset.py プロジェクト: anhtu95/prmu2017
    def load_data_target(self):
        #if os.path.exists(self.dump_name + '1'):
        #    self.load_dataset()
        if self.target is None:
            self.target = []
            self.data = []
            self.hash_target = []
            # 初期化
            alcon = AlconUtils(self.data_dir_path)

            # アノテーションの読み込み
            fn = "target_lv" + self.dump_name + ".csv"
            alcon.load_annotations_target(fn)

            fn = "groundtruth_lv" + self.dump_name + ".csv"
            alcon.load_annotations_ground(fn)

            for bb_id, target in alcon.targets.items():
                img_filename = alcon.get_filename_char(bb_id)
                #print(img_filename)
                code = alcon.ground_truth[bb_id]
                # Load an color image in grayscale
                img = cv2.imread(img_filename, 0)
                #print(shape(img))
                height, width = img.shape
                WHITE = [255, 255, 255]
                if height > width:
                    img = cv2.copyMakeBorder(img,
                                             0,
                                             0, (height - width) // 2,
                                             int(np.ceil(
                                                 (height - width) / 2)),
                                             cv2.BORDER_CONSTANT,
                                             value=WHITE)
                else:
                    img = cv2.copyMakeBorder(img, (width - height) // 2,
                                             int(np.ceil(
                                                 (width - height) / 2)),
                                             0,
                                             0,
                                             cv2.BORDER_CONSTANT,
                                             value=WHITE)

                #print(img.shape)
                img.resize(self.image_size, self.image_size, 1)
                #img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                self.data.append(img)

                # determine index of target of this data
                index_target = -1
                try:
                    index_target = self.hash_target.index(code)
                except ValueError:
                    self.hash_target.append(code)
                    index_target = self.hash_target.index(code)
                self.target.append(index_target)

            self.data = np.array(self.data, np.float32)
            self.target = np.array(self.target, np.int32)
            self.categories = len(self.hash_target)
            print(self.data.shape)
            print(self.target.shape)
            print(self.categories)