def load_validation_set(
        data_source: DataSources = DataSources.VALIDATION_1) -> [Data]:
    root_folder, image_type = data_source.value

    data: [Data] = []
    with open('./%s/validation_set.csv' % root_folder, newline='') as csv_file:
        validation_csv = csv.reader(csv_file, delimiter=',', quotechar='|')
        for row in validation_csv:
            if row[0]:
                image = row[1]
                rx, ry, rz, tx, ty, tz = np.array(row[2:8]).astype(np.float)
                scale = tz
                image_ = './%s/images/%s' % (root_folder, image)
                pose = np.array([scale, rx, ry, rz, tx, ty])

                with open(
                        './%s/images/%s' %
                    (root_folder, image.replace('.png', '.pts'))) as pts_file:
                    rows = [rows.strip() for rows in pts_file]
                rows = rows[rows.index('{') + 1:rows.index('}')]
                landmarks_2d = np.array([row.split(' ')
                                         for row in rows]).astype(np.float)

                data.append(Data(image_, landmarks_2d, pose))

    return data
def load_validation_dataset2(
        data_source: DataSources = DataSources.VALIDATION_2,
        recalc_pose=True) -> [Data]:
    root_folder, image_type = data_source.value

    log.info('load_validation_data2::%s:: started' % data_source.name)
    data = []

    from GenerateTrainingSet import get_face_model, calc_projection, estimate_pose_from_landmarks
    face_model = get_face_model()

    with open('%s/valid_set2.csv' % root_folder, newline='') as csv_file:
        validation_csv = csv.reader(csv_file, delimiter=',', quotechar='|')
        for row in validation_csv:
            if row[0]:
                image = row[1]
                landmarks = np.array([
                    np.array(re.sub('[\[\]]', '',
                                    re.sub('[ ]+', ' ',
                                           landmark)).strip().split(' '),
                             dtype=np.float32) for landmark in row[8:]
                ])
                if recalc_pose:
                    proj_mat_t_land = calc_projection(landmarks,
                                                      face_model.model_TD,
                                                      face_model)
                    pose = estimate_pose_from_landmarks(
                        proj_mat_t_land, face_model)
                else:
                    pose = np.array(row[2:8], dtype=np.float32)
                data.append(
                    Data('%s/%s' % (root_folder, image), landmarks, pose))

    return data
Exemple #3
0
def detect_face_dlib(data: Data) -> Data:
    image_mat = dlib.load_rgb_image(data.image)
    faces, scores, idx = dlib_detector.run(image_mat, 1, -1)
    # log.info(faces)

    if len(faces) > 1:
        # log.info('*************************** TOO MANY FACES - get the one with max score !!!!')
        index = 0
        max_score = scores[0]
        for i, score in enumerate(scores):
            if score > max_score:
                max_score = score
                index = i
        # shape = predictor(image_gray, faces[0])
        x = faces[index].left()
        y = faces[index].top()
        w = faces[index].right() - x
        h = faces[index].bottom() - y
    elif len(faces) > 0:
        x = faces[0].left()
        y = faces[0].top()
        w = faces[0].right() - x
        h = faces[0].bottom() - y
    else:
        log.error(
            'detect_face_dlib:: ERROR - failed to find face bbox for %s' %
            data.image)
        return data

    data.bbox = np.array([x, y, w, h])
    return data
def load_test_set(data_source: DataSources = DataSources.TEST_SET) -> [Data]:
    root_folder, image_type = data_source.value

    data: [Data] = []
    for root, dirs, files in os.walk("%s" % root_folder, topdown=False):
        for name in files:
            if name.endswith('.%s' % image_type):
                data.append(Data('%s/%s' % (root_folder, name), None, None))

    return data
Exemple #5
0
def main():

    ### PDI ~ C4.5
    # train_x, train_y, test_x, test_y = Data().fetchFromH5('train_catvnoncat.h5', 'test_catvnoncat.h5')
    # criterion = "entropy"

    # training_data = dt.preProcess(train_x, train_y, normalize=True, gray=True)
    # decision_tree = dt.growTree(training_data, criterion)

    # dt.plotDiagram(decision_tree, extension=criterion)
    # dt.prune(decision_tree, criterion, 0.75)
    # dt.plotDiagram(decision_tree, extension=(criterion+"_pruned"))

    # test_data = dt.preProcess(test_x, normalize=True, gray=True)
    # result_text = dt.classify(decision_tree, test_data, test_y)
    # print("\n%s" % "\n".join(result_text))
    # Data().saveVariable(name="decision_tree", extension=(criterion+"_classify_result"), value=result_text)

    ### CI ~ C4.5
    train_x, train_y = Data().fetchFromPath('characters', 't0')
    test_x, test_y = Data().fetchFromPath('characters', 't1')
    criterion = "entropy"

    training_data = dt.preProcess(train_x,
                                  train_y,
                                  norm=True,
                                  gray=True,
                                  seg=True)
    decision_tree = dt.growTree(training_data, criterion)

    dt.plotDiagram(decision_tree, extension=criterion)
    # dt.prune(decision_tree, criterion, 0.5)
    # dt.plotDiagram(decision_tree, extension=(criterion+"_pruned"))

    test_data = dt.preProcess(test_x, norm=True, gray=True, seg=True)
    result_text = dt.classify(decision_tree, test_data, test_y)
    Data().saveVariable(name="decision_tree",
                        extension=(criterion + "_classify_result"),
                        value=result_text)
    print("\n%s" % "\n".join(result_text))
    def favEdit(self, itemName, itemWidget):
        if self.lineNumber[itemName] == "one":

            #Test if the widget is already a favorite
            firstWidget =[elem[0] for elem in self.orderWidget.values()]
            if itemWidget == firstWidget[0]:
                return
            self.orderWidget.insert(0, itemName,[itemWidget])
            self.drawLines()

        elif self.lineNumber[itemName] == "two":
            # Test if the widget is already a favorite
            firstWidget = [elem[0] for elem in self.orderWidget.values()]
            if itemWidget == firstWidget[0]:
                return
            try:
                self.orderWidget.insert(0, itemName ,[itemWidget])
                self.orderWidget.insert(1, dt.longNameEdit("garment",itemName,"garment","pattern"), self.orderWidget[dt.longNameEdit("garment",itemName,"garment","pattern")])
            except:
                self.orderWidget.insert(0, itemName, [itemWidget])

            self.drawLines()

        elif self.lineNumber[itemName] == "three":
            # Test if the widget is already a favorite
            firstWidget = [elem[0] for elem in self.orderWidget.values()]
            if itemWidget == firstWidget[0]:
                return


            try:
                self.orderWidget.insert(0, itemName, [itemWidget])
                self.orderWidget.insert(1,dt.longNameEdit("garment",itemName,"garment","pattern"), self.orderWidget[dt.longNameEdit("garment",itemName,"garment","pattern")])
                self.orderWidget.insert(2,dt.longNameEdit("garment",itemName,"garment","retopo"), self.orderWidget[dt.longNameEdit("garment",itemName,"garment","retopo")])
            except:
                self.orderWidget.insert(0, itemName, [itemWidget])

            self.drawLines()
Exemple #7
0
def ae_od(normal_data,
          all_data,
          ae_model,
          EPISODE_MAX=10000,
          BATCH_SIZE=64,
          verbose=False):
    [n_obj, n_f] = all_data.shape
    train_data = Data(normal_data)
    loss_list = np.zeros(200)
    for episode in range(EPISODE_MAX):
        batch_x = train_data.next_batch(BATCH_SIZE)
        train_loss = ae_model.train_model(batch_x)
        loss_list[episode % 200] = train_loss
        avg = 0.
        std = 0.

        if episode % 200 == 0 and episode // 200 != 0:
            std = np.std(loss_list)
            avg = np.average(loss_list)
            if std < 0.05 * avg or avg < 1e-5:
                if verbose:
                    print(
                        '  DeepAE:{}, episode: {}, loss: {:.4}, avg,std: {:.4}, {:.4}'
                        .format(batch_x.shape, episode, train_loss, avg, std))
                break
            loss_list = np.zeros(200)

        if episode % 2000 == 0 and verbose:
            print(
                '  DeepAE:{}, episode: {}, loss: {:.4}, avg,std: {:.4}, {:.4}'.
                format(batch_x.shape, episode, train_loss, avg, std))

    anomaly_scores = np.zeros([n_obj])
    for i, obj in enumerate(all_data):
        anomaly_scores[i] = ae_model.test_model(obj.reshape([1, n_f]))
    return anomaly_scores
def _300w_3d_parser(name: str,
                    root_folder: str,
                    image_type: str,
                    landmarks_fld_name='pt2d') -> Data:
    image = '%s/%s' % (root_folder, name)
    meta = loadmat('%s/%s' %
                   (root_folder, name.replace('.%s' % image_type, '.mat')))
    rx, ry, rz, tx, ty, tz, scale = meta["Pose_Para"].reshape([-1]).astype(
        np.float32)  # pitch yaw roll

    # convert to rotvec
    r = R.from_euler('zxy', [rx, ry, rz], degrees=False)
    rx, ry, rz = r.as_rotvec()

    pose = np.array([rx, ry, rz, tx, ty, tz, scale])

    landmarks_2d = (meta[landmarks_fld_name]).astype(np.float32).transpose()
    return Data(image, landmarks_2d, pose)
Exemple #9
0
def detect_face_landmarks_dlib(data: Data) -> Data:
    try:
        image = cv2.imread(data.image)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        x, y, w, h = data.bbox
        dlib_rect = dlib.rectangle(x, y, x + w, y + h)
        landmarks = []
        predicted_landmarks = predictor(image, dlib_rect)
        for i in range(68):
            landmarks.append(
                np.array([
                    predicted_landmarks.part(i).x,
                    predicted_landmarks.part(i).y
                ],
                         dtype=np.float32))
        data.landmarks_2d = np.array(landmarks, dtype=np.float32)
    except:
        log.warning('did not find landmarks to image %s ' % data.image)
    return data
Exemple #10
0
def load_naive_augmented_dataset(data_source: DataSources, limit=-1) -> [Data]:
    root_folder, image_type = data_source.value

    log.info('load_naive_augmented_validation_set::%s' % data_source.name)

    data: [Data] = []
    meta_data = {}

    for root, dirs, files in os.walk(root_folder, topdown=False):
        for name in files:
            if -1 < limit == len(data):
                break

            if name.endswith('_aug.%s' % image_type):

                if name not in meta_data:

                    meta_file_name = name.split('--')
                    meta_file_name = '%s.meta' % meta_file_name[0]

                    with open('%s/%s' % (root_folder, meta_file_name)) as f:
                        meta_lines = f.readlines()

                    for meta_line in meta_lines:
                        file_name, pose, landmarks_2d_ = meta_line.split('|')
                        meta_data[file_name] = (np.array(
                            json.loads(pose.replace(' ', ','))),
                                                np.array(
                                                    json.loads(
                                                        landmarks_2d_.replace(
                                                            '  ', ' ').replace(
                                                                ' ', ','))))

                rx, ry, rz, tx, ty, tz = meta_data[name][0]
                data.append(
                    Data('%s/%s' % (root_folder, name), meta_data[name][1],
                         np.array([rx, ry, rz, tx, ty, tz])))

    return data
Exemple #11
0
from Utils import Data as data
from functools import partial
import coutureCore as cc
reload(cc)

from maya import OpenMayaUI as omui
import pymel.core as pm
import os

folderPath = os.path.split(__file__)[0]

dialog = None

if not pm.objExists('CoutureDataNode'):
    dataNode = data.coutureDataNode()
else:
    dataNode = data.coutureDataNode("CoutureDataNode")

from vendor.qtpy import Qt
from vendor.qtpy.Qt import QtWidgets
from vendor.qtpy.Qt import QtGui as qg
from vendor.qtpy.Qt import QtCore as qc

import logging
logging.basicConfig()
logger = logging.getLogger('CoutureUI')
logger.setLevel(logging.DEBUG)

#Defining Maya API version
MAYA2014 = 20140000
Exemple #12
0
def get_face_bb(data: Data) -> Data:
    data.bbox = get_face_bb2(data.landmarks_2d)
    return data