def evaluating_fast(eval_data):
    print('Evaluating...')

    # Loading instances
    classes = 10
    hidden_layers = 50
    instance_nn = ClassNN(model_dir='/tmp/model', classes=classes, hidden_number=hidden_layers)

    data = instance_nn.predict_model_fast(eval_data[0])
    print(data)

    data = instance_nn.predict_model_fast(eval_data[1])
    print(data)

    start = time.time()
    data = instance_nn.predict_model_fast(eval_data[3])
    end = time.time()
    print(data)
    print('Time elapsed: {0}'.format(end - start))

    start = time.time()
    data = instance_nn.predict_model_fast(eval_data[0])
    end = time.time()

    print(data)
    print('Time elapsed: {0}'.format(end - start))

    print('Done evaluating fast')
def evaluating_fast_nn():
    print('Initializing evaluating fast nn')

    classes = 8
    hidden_layers = 40
    instance_nn = ClassNN(model_dir=ClassNN.model_dir_pose, classes=classes, hidden_number=hidden_layers)
    instance_pose = ClassOpenPose()

    info = ClassDescriptors.load_images_comparision_ext(instance_pose, min_score=0.05, load_one_img=True)
    pose1 = info['pose1']

    items = ClassDescriptors.get_person_descriptors(pose1, 0.05)

    # Valid pose for detection
    data_to_add = list()
    data_to_add += items['angles']
    data_to_add += ClassUtils.get_flat_list(items['transformedPoints'])

    data_np = np.asanyarray(data_to_add, dtype=np.float)
    result = instance_nn.predict_model_fast(data_np)
    print(result)
    key_pose = result['classes']
    probability = result['probabilities'][key_pose]

    print('Key pose: {0}'.format(key_pose))
    print('Probability: {0}'.format(probability))
    print('Done!')
Exemple #3
0
def calculate_poses(option: Option, nn_classifier: ClassNN, svm_classifier: ClassSVM):
    print('Calculating poses using nn')

    # Recalculate all poses and get confidence
    for classInfo in list_classes:
        folder = classInfo['folderPath']
        for root, _, files in os.walk(folder):
            for file in files:
                full_path = os.path.join(root, file)
                ext = ClassUtils.get_filename_extension(full_path)

                if '_rawdata' in file and ext == '.json':
                    print('Processing file: {0}'.format(full_path))

                    with open(full_path, 'r') as f:
                        file_txt = f.read()

                    file_json = json.loads(file_txt)
                    list_poses = file_json['listPoses']

                    for pose in list_poses:
                        angles = pose['angles']
                        transformed_points = pose['transformedPoints']

                        # Using transformed points and angles
                        list_desc = list()
                        list_desc += ClassUtils.get_flat_list(transformed_points)

                        # Convert to numpy
                        list_desc_np = np.asanyarray(list_desc, np.float)

                        if option == Option.NN:
                            result = nn_classifier.predict_model_fast(list_desc_np)
                        else:
                            result = svm_classifier.predict_model(list_desc_np)

                        pose['class'] = int(result['classes'])
                        pose['probabilities'] = result['probabilities'].tolist()

                    # Writing again into file
                    file_txt = json.dumps(file_json, indent=4)
                    new_full_path = ClassUtils.change_ext_training(full_path,
                                                                   '{0}_posedata'.format(option.value))

                    with open(new_full_path, 'w') as f:
                        f.write(file_txt)

                    # Done

    print('Done processing elements')
def cnn_reprocess_images():
    print('Re processing images')
    list_folders = list()
    list_folders.append(ClassUtils.cnn_class_folder)

    # Loading instances
    instance_nn = ClassNN(ClassNN.model_dir_pose, classes_number, hidden_layers)

    # File walk
    count = 0
    for folder in list_folders:
        for root, _, files in os.walk(folder):
            for file in files:
                full_path = os.path.join(root, file)
                extension = ClassUtils.get_filename_extension(full_path)

                if extension == '.json':
                    print('Processing file: {0}'.format(full_path))

                    with open(full_path, 'r') as f:
                        json_txt = f.read()

                    json_data = json.loads(json_txt)
                    list_poses = json_data['listPoses']

                    for pose in list_poses:
                        angles = pose['angles']
                        transformed_points = pose['transformedPoints']

                        list_desc = list()
                        list_desc += angles
                        list_desc += ClassUtils.get_flat_list(transformed_points)

                        list_desc_np = np.asanyarray(list_desc, dtype=np.float)

                        res = instance_nn.predict_model_fast(list_desc_np)
                        pose['keyPose'] = int(res['classes'])
                        pose['probability'] = 1

                    # Writing data again
                    data_txt = json.dumps(json_data, indent=2)
                    with open(full_path, 'w') as f:
                        f.write(data_txt)

                    count += 1

    print('Done')
    print('Total files processed: {0}'.format(count))
Exemple #5
0
def eval_model(eval_data_np: np.ndarray, eval_labels_np: np.ndarray,
               instance_nn_train: ClassNN):
    classes = len(list_folder_data)

    # Evaluate
    instance_nn_train.eval_model(eval_data_np, eval_labels_np)

    # Getting confussion matrix
    print('Getting confusion matrix')

    confusion_np = np.zeros((classes, classes))
    for i in range(eval_data_np.shape[0]):
        data = eval_data_np[i]
        expected = eval_labels_np[i]
        obtained = instance_nn_train.predict_model_fast(data)
        class_prediction = obtained['classes']

        confusion_np[expected, class_prediction] += 1

    print('Confusion matrix')
    print(confusion_np)

    print('Done!')
Exemple #6
0
def classify_images(list_folder_data: list, type_desc: EnumDesc):
    classes_number = 0

    cont = True
    while cont:
        cont = False

        for folder_data in list_folder_data:
            if folder_data[2] == classes_number:
                classes_number += 1
                cont = True
                break

    hidden_number = 60
    learning_rate = 0.005
    steps = 20000

    # Initialize classifier instance
    nn_classifier = ClassNN(model_dir=ClassNN.model_dir_pose,
                            classes=classes_number,
                            hidden_number=hidden_number,
                            learning_rate=learning_rate)

    results = ClassLoadDescriptors.load_pose_descriptors(type_desc)

    training_data_np = results['trainingData']
    training_labels_np = results['trainingLabels']
    eval_data_np = results['evalData']
    eval_labels_np = results['evalLabels']
    training_files_np = results['trainingFiles']
    eval_files_np = results['evalFiles']
    label_names = results['labelNames']

    # Prompt for user input
    selection = input('Training selected {0}. '
                      'Press 1 to train, 2 to evaluate, 3 to predict, 4 to save csv, '
                      '5 to get confusion matrix: '.format(type_desc))

    if selection == '1':
        # Training
        nn_classifier.train_model(train_data=training_data_np,
                                  train_labels=training_labels_np,
                                  label_names=label_names,
                                  steps=steps)

        # Evaluate after training
        nn_classifier.eval_model(eval_data_np, eval_labels_np)
    elif selection == '2':
        # Evaluate
        nn_classifier.eval_model(eval_data_np, eval_labels_np)
    elif selection == '3':
        # Predict
        # Select data to eval
        data_eval = eval_data_np[0]
        label_eval = eval_labels_np[0]

        results = nn_classifier.predict_model(data_eval)
        print('Predict data np: {0}'.format(results))
        print('Expected data np: {0}'.format(label_eval))
    elif selection == '4':
        # Saving file in csv
        total_data = np.concatenate((training_data_np, eval_data_np), axis=0)
        total_labels = np.concatenate((training_labels_np, eval_labels_np), axis=0)
        total_files = np.concatenate((training_files_np, eval_files_np))

        # Add new axis to allow concatenation
        total_labels = total_labels[:, np.newaxis]
        total_files = total_files[:, np.newaxis]

        total_np = np.concatenate((total_data, total_labels), axis=1)

        print('Saving data to CSV in file {0}'.format(csv_dir))
        np.savetxt(csv_dir, total_np, delimiter=',', fmt='%10.10f')
        np.savetxt(csv_dir_files, total_files, delimiter=',', fmt='%s')

        print('Saving training data')
        # Concatenate with new axis
        total_np_train = np.concatenate((training_data_np, training_labels_np[:, np.newaxis]), axis=1)
        total_np_eval = np.concatenate((eval_data_np, eval_labels_np[:, np.newaxis]), axis=1)

        # Saving
        np.savetxt(csv_train, total_np_train, delimiter=',', fmt='%10.10f')
        np.savetxt(csv_eval, total_np_eval, delimiter=',', fmt='%10.10f')

        print('Done writing file in CSV')
    elif selection == '5':
        print('Getting confusion matrix')

        confusion_np = np.zeros((classes_number, classes_number))
        for i in range(eval_data_np.shape[0]):
            data = eval_data_np[i]
            expected = eval_labels_np[i]
            obtained = nn_classifier.predict_model_fast(data)
            class_prediction = obtained['classes']
            print('Class: {0}'.format(class_prediction))

            confusion_np[expected, class_prediction] += 1

        print('Confusion matrix')
        print(confusion_np)
        print('Labels: {0}'.format(label_names))
    else:
        raise Exception('Option not supported')
Exemple #7
0
def load_descriptors(instance_nn_train: ClassNN, instance_nn_pose: ClassNN,
                     pose_type: Desc):
    training_data = list()
    training_labels = list()
    eval_data = list()
    eval_labels = list()
    training_files = list()
    eval_files = list()

    for index, item in enumerate(list_folder_data):
        folder = item['folderPath']
        label = item['label']

        print('Processing folder path: {0}'.format(folder))

        num_file = 0
        list_paths = list()
        for root, _, files in os.walk(folder):
            for file in files:
                full_path = os.path.join(root, file)
                extension = ClassUtils.get_filename_extension(full_path)

                if extension == '.json':
                    list_paths.append(full_path)

        total_samples = len(list_paths)
        total_train = int(total_samples * 80 / 100)

        # Shuffle samples
        random.Random(seed).shuffle(list_paths)

        for full_path in list_paths:
            # Reading data
            with open(full_path, 'r') as f:
                json_txt = f.read()

            json_data = json.loads(json_txt)

            list_poses = json_data['listPoses']

            # Sampling data
            descriptor = list()
            for index_size in range(samples_size):
                index_pose = int(len(list_poses) * index_size / samples_size)
                pose = list_poses[index_pose]
                transformed_points = pose['transformedPoints']
                angles = pose['angles']

                list_desc = list()
                list_desc += angles
                list_desc += ClassUtils.get_flat_list(transformed_points)

                if pose_type == Desc.POSES:
                    list_desc_np = np.asanyarray(list_desc, dtype=np.float)
                    res = instance_nn_pose.predict_model_fast(list_desc_np)

                    # Add descriptor with probabilities
                    for elem in res['probabilities']:
                        descriptor.append(elem)
                elif pose_type == Desc.ALL:
                    for elem in list_desc:
                        descriptor.append(elem)
                elif pose_type == Desc.POINTS:
                    list_flat = ClassUtils.get_flat_list(transformed_points)
                    for elem in list_flat:
                        descriptor.append(elem)
                else:
                    raise Exception(
                        'Pose type not recognized: {0}'.format(pose_type))

            if num_file < total_train:
                training_data.append(descriptor)
                training_labels.append(label)
                training_files.append(full_path)
            else:
                eval_data.append(descriptor)
                eval_labels.append(label)
                eval_files.append(full_path)

            num_file += 1

    # Convert data to numpy array
    training_data_np = np.asanyarray(training_data, dtype=np.float)
    training_labels_np = np.asanyarray(training_labels, dtype=int)

    eval_data_np = np.asanyarray(eval_data, dtype=np.float)
    eval_labels_np = np.asanyarray(eval_labels, dtype=int)

    print('Shape images training: {0}'.format(training_data_np.shape))
    print('Shape labels training: {0}'.format(training_labels_np.shape))

    if training_data_np.shape[0] == 0:
        raise Exception('No files found!')

    res = input('Press 1 to train - 2 to eval: ')

    if res == '1':
        train_model(training_data_np,
                    training_labels_np,
                    eval_data_np,
                    eval_labels_np,
                    instance_nn_train,
                    steps=30000)
    elif res == '2':
        eval_model(eval_data_np, eval_labels_np, instance_nn_train)
    else:
        raise Exception('Option not implemented!')
def train_bow(training_list_cls, training_cls_labels, validate_list_cls,
              validate_cls_labels, eval_list_actions, eval_labels,
              option: Option, base_data_1, base_data_2):
    print('Training BoW')

    # Generating BoW descriptors
    train_descriptors = list()
    for list_actions in training_list_cls:
        words = get_bow_descriptors(list_actions)
        train_descriptors.append(words)

    descriptors_np = np.asanyarray(train_descriptors, dtype=np.float)
    training_labels_np = np.asanyarray(training_cls_labels, dtype=np.int)

    # Generating instance_nn and train model
    cls_number = len(list_classes)
    hidden_neurons = 20
    instance_nn = ClassNN(ClassNN.model_dir_activity, cls_number,
                          hidden_neurons)
    instance_nn.train_model(descriptors_np, training_labels_np)

    # Validating model
    validate_descriptors = list()
    for list_actions in validate_list_cls:
        words = get_bow_descriptors(list_actions)
        validate_descriptors.append(words)

    validate_descriptors_np = np.asanyarray(validate_descriptors,
                                            dtype=np.float)
    validate_labels_np = np.asanyarray(validate_cls_labels, dtype=np.int)

    accuracy = instance_nn.eval_model(validate_descriptors_np,
                                      validate_labels_np)
    print('Local accuracy: {0}'.format(accuracy))

    # Evaluating
    eval_descriptors = list()
    for list_actions in eval_list_actions:
        words = get_bow_descriptors(list_actions)
        eval_descriptors.append(words)

    eval_descriptors_np = np.asanyarray(eval_descriptors, dtype=np.float)
    eval_labels_np = np.asanyarray(eval_labels, dtype=np.int)

    real_accuracy = instance_nn.eval_model(eval_descriptors_np, eval_labels_np)
    print('Real accuracy: {0}'.format(real_accuracy))

    classes_number = len(list_classes)
    confusion_np = np.zeros((classes_number, classes_number))
    for i in range(eval_descriptors_np.shape[0]):
        data = eval_descriptors_np[i]
        expected = eval_labels_np[i]
        obtained = instance_nn.predict_model_fast(data)
        class_prediction = obtained['classes']
        print('Class: {0}'.format(class_prediction))

        confusion_np[expected, class_prediction] += 1

    print('Confusion matrix')
    print(confusion_np)

    apply_classifier(option,
                     base_data_1,
                     base_data_2,
                     instance_nn=instance_nn,
                     accuracy=accuracy,
                     real_accuracy=real_accuracy)