コード例 #1
0
def predict_preprocess(full_img, bbox):
# 対象領域を切り出す
    x, y, w, h = bbox
    target_img = full_img[y:y+h, x:x+w, :]

    # 画像から特徴抽出
    return MyAlgorithm.feature_extraction(target_img)
コード例 #2
0
def read_data(datasetdir, lv):
    alcon = AlconUtils(datasetdir)

    # アノテーションの読み込み
    fn = "target_lv" + lv + ".csv"
    alcon.load_annotations_target(fn)

    fn = "groundtruth_lv" + lv + ".csv"
    alcon.load_annotations_ground(fn)

    # KNNモデルの作成
    dataset = {}
    for bb_id, target in alcon.targets.items():
        img_filename = alcon.get_filename_char(bb_id)
        code = alcon.ground_truth[bb_id][0]
        if code not in dataset:
            dataset[code] = []
        #if len(dataset[code]) == 100:
        #    continue
        img = cv2.imread(img_filename)
        feature = MyAlgorithm.feature_extraction(img)
        dataset[code].append(feature)

    labels = []
    data = []
    classes = sorted(dataset.keys())
    for label, values in dataset.items():
        labels += [classes.index(label)] * len(values)
        data += values

    data = np.asarray(data, dtype=np.float)
    labels = np.asarray(labels, dtype=np.int)

    return data, labels, classes
コード例 #3
0
def main(datasetdir, lv):
    start = time.time()
    """
    評価コードのメイン
    :param datasetdir データセットを格納したディレクトリへのパス
    """

    # 初期化
    print("Now evaluation...")

    alcon = AlconUtils(datasetdir)
    myalgorithm = MyAlgorithm(datasetdir)

    # ターゲットの読み込み
    fn = "target_lv" + lv + "_test_5.csv"
    alcon.load_annotations_target(fn)

    results = {}
    prev = None
    # 1ターゲットずつ認識していくループ
    for bb_id, target in alcon.targets.items():
        img_file_id, *bb = target
        img_filename = os.path.join(datasetdir, "images", img_file_id + ".jpg")

        if prev != img_filename:
            # ページ全体の画像
            full_img = cv2.imread(img_filename)
            prev = img_filename

        # 認識処理(ここで各自のアルゴリズムを呼び出します)
        result = myalgorithm.predict(full_img, bb)

        # boundingbox id と紐付けて保存
        results[bb_id] = result

    # 評価
    fnGround = "groundtruth_lv" + lv + "_test_5.csv"
    alcon.load_annotations_ground(fnGround)
    alcon.evaluation(results)

    # ファイルに結果を書き込む
    alcon.write_results(results)

    elapsed_time = time.time() - start
    print("elapsed_time:", elapsed_time, "[sec]")
コード例 #4
0
def main(datasetdir,lv):

    # 初期化
    alcon = AlconUtils(datasetdir)

    # アノテーションの読み込み
    fn = "target_lv" + lv + "_samp_5.csv"
    alcon.load_annotations_target(fn)

    fn = "groundtruth_lv" + lv + "_samp_5.csv"
    alcon.load_annotations_ground(fn)

    
    # KNNモデルの作成
    dataset = {}
    for bb_id, target in alcon.targets.items():
        img_filename = alcon.get_filename_char( bb_id )
        code = alcon.ground_truth[bb_id][0]
        if code not in dataset:
            dataset[code] = []
        # if len(dataset[code]) == 5:
        #     continue
        img = cv2.imread( img_filename )
        feature = MyAlgorithm.feature_extraction(img)
        dataset[code].append(feature)

    labels = []
    data = []
    classes = sorted(dataset.keys())
    for label, values in dataset.items():
        labels += [classes.index(label)] * len(values)
        data += values

    data = np.asarray(data, dtype=np.float)
    labels = np.asarray(labels, dtype=np.int)

    classifier = KNeighborsClassifier() #編集
    classifier.fit(data, labels)

    outputfile = "./model.pkl"
    joblib.dump((classes, classifier), outputfile)
コード例 #5
0
from alcon_utils import AlconUtils
import cv2

import os.path
import pickle

datasetdir = "../../dataset/"
annotation_name = "iiyama_0.1"
"""
評価コードのメイン
:param datasetdir データセットを格納したディレクトリへのパス
"""

# 初期化
alcon = AlconUtils(datasetdir)
myalgorithm = MyAlgorithm(datasetdir)

# ターゲットの読み込み
file_name_last = "_lv1_" + annotation_name + ".csv"
alcon.load_annotations_target("target" + file_name_last)

imgs = {}
results_pre = {}
# 1ターゲットずつ認識していくループ
print("coco")

def predict_preprocess(full_img, bbox):
# 対象領域を切り出す
    x, y, w, h = bbox
    target_img = full_img[y:y+h, x:x+w, :]
コード例 #6
0
def main(datasetdir, lv):
    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.95
    backend.tensorflow_backend.set_session(tensorflow.Session(config=config))

    # 初期化
    alcon = AlconUtils(datasetdir)

    # アノテーションの読み込み
    alcon.load_annotations_target("target_lv" + lv + ".csv")
    alcon.load_annotations_ground("groundtruth_lv" + lv + ".csv")

    dataset = {}
    for bb_id, target in alcon.targets.items():
        code = alcon.ground_truth[bb_id][0]
        if code not in dataset:
            dataset[code] = []
        # if len(dataset[code]) == 10:
        #     continue
        img_filename = alcon.get_filename_char(bb_id)
        img = cv2.imread(img_filename)
        feature = MyAlgorithm.feature_extraction(img)
        dataset[code].append(feature)

    labels = []
    data = []
    classes = sorted(dataset.keys())

    for label, values in dataset.items():
        labels += [classes.index(label)] * len(values)
        data += values

    batch_size = 128
    num_classes = 46
    epochs = 12
    img_rows, img_cols = 32, 32
    channel = 1

    y_train = keras.utils.to_categorical(labels, num_classes)

    classifier = Sequential()
    classifier.add(
        keras.layers.normalization.BatchNormalization(input_shape=(img_rows,
                                                                   img_cols,
                                                                   channel)))
    classifier.add(Conv2D(32, (3, 3), activation='relu'))
    classifier.add(Conv2D(64, (3, 3), activation='relu'))
    classifier.add(MaxPooling2D(pool_size=(4, 4)))
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    classifier.add(Dropout(0.5))
    classifier.add(Flatten())
    classifier.add(Dense(128, activation='relu'))
    classifier.add(Dropout(0.5))
    classifier.add(Dense(num_classes, activation='softmax'))

    classifier.compile(loss=keras.losses.categorical_crossentropy,
                       optimizer=keras.optimizers.Nadam(),
                       metrics=['accuracy'])
    classifier.summary()

    numpy_data = numpy.asarray(data, dtype=numpy.float)
    float_data = numpy_data.reshape(numpy_data.shape[0],img_rows,img_cols, channel )\
                           .astype('float32')

    numpy_labels = numpy.asarray(labels, dtype=numpy.int)
    y_train = keras.utils.to_categorical(numpy_labels, num_classes)
    classifier.fit(float_data,
                   y_train,
                   batch_size=batch_size,
                   epochs=epochs,
                   verbose=1,
                   validation_data=None)

    outputfile = "./model.pkl"
    outputfile2 = "./model2.pkl"
    joblib.dump(classes, outputfile)
    classifier.save(outputfile2)
コード例 #7
0
ファイル: train.py プロジェクト: IKKO-Ohta/ALCON16
def main(datasetdir,lv):

    
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1.0
    K.tensorflow_backend.set_session(tf.Session(config=config))
    
    # 初期化
    alcon = AlconUtils(datasetdir)

    # アノテーションの読み込み
    fn = "target_lv" + lv + ".csv"


    alcon.load_annotations_target(fn)

    fn = "groundtruth_lv" + lv + ".csv"
    alcon.load_annotations_ground(fn)

    
    # KNNモデルの作成
    dataset = {}
    print(len(alcon.targets.items()))
    for bb_id, target in alcon.targets.items():
        img_filename = alcon.get_filename_char( bb_id )
        code = alcon.ground_truth[bb_id][0]
        if code not in dataset:
            dataset[code] = []
        if len(dataset[code]) == 100:
            continue
        img = cv2.imread( img_filename )
        feature = MyAlgorithm.feature_extraction(img)
        dataset[code].append(feature)

    labels = []
    data = []
    classes = sorted(dataset.keys())
    print(len(dataset.items()))
    for label, values in dataset.items():
        labels += [classes.index(label)] * len(values)
        data += values

    data = np.asarray(data, dtype=np.float)
    labels = np.asarray(labels, dtype=np.int)

    print(data.shape)
    print(labels.shape)
#   classifier = KNeighborsClassifier()
#   classifier = RandomForestClassifier()
#   classifier.fit(data, labels)

    batch_size = 128
    num_classes = 46
    epochs = 12
    img_rows, img_cols = 32, 32
#    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = data
    y_train = labels
    x_train = x_train.reshape(x_train.shape[0],img_rows,img_cols,3)
#    x_test = x_test.reshape(x_test.shape[0],img_rows,img_cols,1)
    input_shape = (img_rows, img_cols, 3)
    x_train = x_train.astype('float32')
#    x_test = x_test.astype('float32')
  
#    x_test /= 255
    
    y_train = keras.utils.to_categorical(y_train, num_classes)
#    y_test = keras.utils.to_categorical(y_test, num_classes)
     
    
    
    
    classifier = Sequential()

    classifier.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=input_shape,padding='same'))
    classifier.add(Conv2D(64,(3,3),activation='relu',padding='same'))
    classifier.add(MaxPooling2D(pool_size=(2,2)))
    classifier.add(Dropout(0.25))
    classifier.add(Flatten())
    classifier.add(Dense(128,activation='relu'))
    classifier.add(Dropout(0.5))
    classifier.add(Dense(num_classes, activation='softmax'))
    
    classifier.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.Adadelta(),metrics=['accuracy'])


    classifier.fit(x_train,y_train,batch_size=batch_size,epochs=epochs,verbose=1,validation_data=None)

#    classifier.fit(data,labels)


    outputfile = "./model.pkl"
    outputfile2 = "./model2.pkl"
    joblib.dump(classes, outputfile)
    classifier.save(outputfile2)
コード例 #8
0
ファイル: NN_train3.py プロジェクト: seigenji/alcon_src
def main(datasetdir, lv, length):
    pr = cProfile.Profile()
    pr.enable()
    config = tensorflow.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.93
    keras.backend.tensorflow_backend.set_session(
        tensorflow.Session(config=config))

    # 初期化
    alcon = AlconUtils(datasetdir)

    # アノテーションの読み込み
    alcon.load_annotations_target("target_lv1_samp_0.9.csv")
    alcon.load_annotations_ground("groundtruth_lv1_samp_0.9.csv")

    dataset = {}
    for bb_id, target in alcon.targets.items():
        code = alcon.ground_truth[bb_id][0]
        if code not in dataset:
            dataset[code] = []
        if len(dataset[code]) == int(length):
            continue
        img_filename = alcon.get_filename_char(bb_id)
        img = cv2.imread(img_filename)
        feature = MyAlgorithm.feature_extraction(img)
        dataset[code].append(feature)

    labels = []
    data = []
    classes = sorted(dataset.keys())

    for label, values in dataset.items():
        labels += [classes.index(label)] * len(values)
        data += values

    num_classes = 46
    input_shape = (32, 32, 1)  # img_rows img_cols channel

    classifier = keras.models.Sequential()
    classifier.add(
        keras.layers.normalization.BatchNormalization(input_shape=input_shape))
    classifier.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))  # 30*30
    classifier.add(Conv2D(64, (3, 3), activation='relu'))  # 28*28
    classifier.add(Dropout(0.5))
    classifier.add(MaxPooling2D(pool_size=(4, 4)))  # 7*7
    classifier.add(Flatten())
    classifier.add(Dense(128, activation='relu'))
    classifier.add(Dropout(0.5))
    classifier.add(Dense(num_classes, activation='softmax'))

    classifier.compile(loss=keras.losses.categorical_crossentropy,
                       optimizer=keras.optimizers.Nadam(),
                       metrics=['accuracy'])
    classifier.summary()

    x_data = np.asarray(data).reshape(len(data), *input_shape)
    y_train = keras.utils.to_categorical(labels, num_classes)
    classifier.fit(x_data, y_train, batch_size=84, epochs=12)

    joblib.dump(classes, "./model.pkl")
    classifier.save("./model2.pkl")
    pr.disable()
    pstats.Stats(pr).sort_stats('tottime').print_stats(5)