コード例 #1
0
def main(unused_argv):
    # Load data
    train_x, train_y, val_x, val_y = load_training_validation_data(
        train_view=train_view,
        train_dir=train_dir,
        val_view=val_view,
        val_dir=val_dir)
    train_data = np.asarray([flatten(x) for x in train_x], dtype=np.float32)
    val_data = np.asarray([flatten(x) for x in val_x], dtype=np.float32)
    train_labels = np.asarray(train_y, dtype=np.int32)
    val_labels = np.asarray(val_y, dtype=np.int32)

    # Create the Estimator
    gei_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn,
                                            model_dir="./chpts")

    # Set up logging for predictions
    #tensors_to_log = {"probabilities": "softmax_tensor"}
    #logging_hook = tf.train.LoggingTensorHook(
    #	tensors=tensors_to_log, every_n_iter=5000)

    # Train the model
    train_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": train_data},
                                                        y=train_labels,
                                                        batch_size=BATCH_SIZE,
                                                        num_epochs=None,
                                                        shuffle=True)
    #gei_classifier.train(
    #	input_fn = train_input_fn,
    #	steps = 20000,
    #	hooks = [logging_hook])
    gei_classifier.train(input_fn=train_input_fn, steps=20000)

    # Evaluate the model and print results
    eval_input_fn = tf.estimator.inputs.numpy_input_fn(x={"x": val_data},
                                                       y=val_labels,
                                                       num_epochs=1,
                                                       shuffle=False)
    eval_results = gei_classifier.evaluate(input_fn=eval_input_fn)
    print(eval_results)
コード例 #2
0
def load_angle_train_data(img_class, view_list, train_dir):
    human_id_list = ["%03d" % i for i in range(1, 51)]
    human_id_list.remove('005')
    human_id_list.remove('026')  #126
    human_id_list.remove('034')
    human_id_list.remove('037')  #144
    human_id_list.remove('046')
    human_id_list.remove('048')  #54

    if view_list is None:
        view_list = [
            "000", "018", "036", "054", "072", "090", "108", "126", "144",
            "162", "180"
        ]
    if train_dir is None:
        train_dir = ["nm-%02d" % i for i in range(1, 5)]

    training_x = []
    training_y = []

    # check dir exists
    for id in human_id_list:
        for dir in train_dir:
            for view in view_list:
                img_dir = "%s/%s/%s/%s" % (config.Project.casia_dataset_b_path,
                                           id, dir, view)
                if not os.path.exists(img_dir):
                    logger.error("%s do not exist" % img_dir)

    for id in human_id_list:
        logger.info("processing human %s" % id)
        for dir in train_dir:
            for view in view_list:
                img_dir = "%s/%s/%s/%s" % (config.Project.casia_dataset_b_path,
                                           id, dir, view)
                data = img_path_to_IMG(img_class, img_dir)
                if len(data.shape) > 0:
                    training_x.append(flatten(data))
                    training_y.append(view)
                else:
                    print("LOAD_ANGLE_TRAIN_DATA: fail to extract %s of %s" %
                          (img_dir, id))
                    sys.exit(0)

    return training_x, training_y
コード例 #3
0
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

train_view = "090"
val_view = "090"
train_dir = ["nm-01"]
val_dir = ["nm-02"]

if __name__ == '__main__':
    train_x, train_y, val_x, val_y = load_training_validation_data(
        train_view=train_view,
        train_dir=train_dir,
        val_view=val_view,
        val_dir=val_dir)
    train_feature_x = [flatten(x) for x in train_x]
    val_feature_x = [flatten(x) for x in val_x]

    model = RandomForestClassification()
    model.fit(x_train=train_feature_x, y_train=train_y)
    predict_y = model.predict(val_feature_x)

    print "predict_y: "
    print predict_y

    correct_count = sum(predict_y == val_y)
    accuracy = correct_count * 1.0 / len(val_y)

    print "train view: %s, val view: %s, accuracy: %d/%d=%.3f" % \
     (train_view, val_view, correct_count, len(val_y), accuracy)
コード例 #4
0
ファイル: main.py プロジェクト: estyvalankomah/AI-Project
from data_tool import load_training_validation_data
from model.models import RandomForestClassification
from feature.hog import flatten


logger = logging.getLogger("main")

level = logging.INFO
log_filename = '%s.log' % __file__
format = '%(asctime)-12s[%(levelname)s] %(message)s'
datefmt ='%Y-%m-%d %H:%M:%S'
logging.basicConfig(level=level,
                    format=format,
                    filename=log_filename,
                    datefmt=datefmt)

if __name__ == '__main__':
    train_dir = "train_dir"
    val_dir = "test_dir"

    training_x, training_y, validation_x, validation_y = load_training_validation_data(train_dir=train_dir, val_dir=val_dir)
    training_feature_x = [flatten(x) for x in training_x]
    validation_feature_x = [flatten(x) for x in validation_x]
    
    logger.info("start")
    model = RandomForestClassification()
    model.fit(x_train=training_feature_x, y_train=training_y)
    predict_file = "%s\model\predict.pickle" % config.Project.project_path
    pickle.dump(model, open(predict_file, 'wb'))
    predict_y = model.predict(validation_feature_x)
    logger.info("done")
コード例 #5
0
def load_data(img_class, data_class, probe_view, probe_dir, gallery_dir):
    probe_type = probe_dir[0][0:2].upper()
    if data_class == "validation":
        human_id_list = ["%03d" % i for i in range(51, 75)]
        human_id_list.remove('067')
        human_id_list.remove('068')
    elif data_class == "testing":
        human_id_list = ["%03d" % i for i in range(75, 125)]
        human_id_list.remove('079')  #054
        human_id_list.remove('088')  #054
        human_id_list.remove('109')  #126
    else:
        print("Wrong data class")
        sys.exit(0)

    probe_x = []
    probe_y = []
    gallery_x = []
    gallery_y = []

    paired_x = []
    paired_y = []
    paired_data = []

    # check dir exists
    for id in human_id_list:
        for dir in probe_dir:
            img_dir = "%s/%s/%s/%s" % (config.Project.casia_dataset_b_path, id,
                                       dir, probe_view)
            if not os.path.exists(img_dir):
                logger.error("%s do not exist" % img_dir)
        for dir in gallery_dir:
            img_dir = "%s/%s/%s/%s" % (config.Project.casia_dataset_b_path, id,
                                       dir, probe_view)
            if not os.path.exists(img_dir):
                logger.error("%s do not exist" % img_dir)

    # get probe list
    for id in human_id_list:
        logger.info("processing human %s" % id)
        for dir in probe_dir:
            img_dir = "%s/%s/%s/%s" % (config.Project.casia_dataset_b_path, id,
                                       dir, probe_view)
            probe_x.append(img_dir)
            probe_y.append(id)

    view_list = [
        "000", "018", "036", "054", "072", "090", "108", "126", "144", "162",
        "180"
    ]

    # get gallery list
    for id in human_id_list:
        for dir in gallery_dir:
            for view in view_list:
                img_dir = "%s/%s/%s/%s" % (config.Project.casia_dataset_b_path,
                                           id, dir, view)
                gallery_x.append(img_dir)
                gallery_y.append(id)

    x_range = len(human_id_list) - 1
    view_coe = len(view_list)

    gallery_coe = len(view_list) * len(gallery_dir)
    gallery_dir_range = len(gallery_dir) - 1

    # get probe data
    probe_imgs = [flatten(img_path_to_IMG(img_class, x)) for x in probe_x]
    angles = get_angle(img_class, probe_type, np.asarray(probe_imgs))
    #angles = np.asarray(len(probe_imgs)*[probe_view])

    # test accuracy
    #print(angles)
    accuracy = sum(angles == [probe_view]) * 1.0 / len(angles)
    print(accuracy)

    for probe_idx, angle in enumerate(angles):
        i = random.randint(0, gallery_dir_range)
        v = view_list.index(angle)
        gallery_idx = gallery_coe * (probe_idx //
                                     len(probe_dir)) + view_coe * i + v
        gallery_img = flatten(
            img_path_to_IMG(img_class, gallery_x[gallery_idx]))
        probe_img = probe_imgs[probe_idx]

        if len(probe_img) > 0 and len(gallery_img) > 0:
            paired_data.append(
                [[np.asarray(probe_img),
                  np.asarray(gallery_img)], [0, 1]])
        else:
            print("LOAD_DATA: fail to extract %s of %s" % (img_dir, id))

        x = random.randint(0, x_range)
        gallery_idx = gallery_coe * (x) + view_coe * i + v
        while (gallery_y[gallery_idx] == probe_y[probe_idx]):
            x = random.randint(0, x_range)
            gallery_idx = gallery_coe * (x) + view_coe * i + v
        gallery_img = flatten(
            img_path_to_IMG(img_class, gallery_x[gallery_idx]))

        if len(probe_img) > 0 and len(gallery_img) > 0:
            paired_data.append(
                [[np.asarray(probe_img),
                  np.asarray(gallery_img)], [1, 0]])
        else:
            print("LOAD_DATA: fail to extract %s of %s" % (img_dir, id))

    random.shuffle(paired_data)

    paired_x = np.asarray([x.tolist() for x in np.asarray(paired_data)[:, 0]])
    paired_y = np.asarray(paired_data)[:, 1]

    return paired_x, paired_y
コード例 #6
0
ファイル: test.py プロジェクト: estyvalankomah/AI-Project
        break

    if event == 'Show Image':
        window['-IMAGE-'].update(filename=values[0], size=(500, 200))

    if event == 'Submit':
        img_path = values[0]
        values = []
        img_dir = []
        im = imread(img_path)
        img_path = ''
        img_dir.append(im)
        data = img_path_to_GEI(img_dir)
        img_dir = []
        validation_x.append(data)
        validation_feature_x = [flatten(x) for x in validation_x]
        validation_x = []

        predict_y = modell.predict(validation_feature_x)
        validation_feature_x = []
        age_group_ID = predict_y[0]
        predict_y = []
        age_group = ''

        if age_group_ID == 'A':
            age_group = '0 - 5'
        elif age_group_ID == 'B':
            age_group = '6 - 10'
        elif age_group_ID == 'C':
            age_group = '11 - 15'
        elif age_group_ID == 'D':