Example #1
0
def run_model(which_set, classifier, batch_size, model_axes, data_axes=('b', 0, 1, 'c')):
    dset = SeqDataset(which_set=which_set)
    #indices = dset.get_filtered_indices(perturbations=['0'], flips=[False])
    indices = range(dset.numSamples)
    #indices = [2,3,4]
    #print indices
    targets_ints = []
    stats = []
    fileName = []

    for n in indices:
        features, targets, fname = dset.get_clip(n)
        misclass = []
        frame_misclass = []
        
        feature = features.reshape(len(features), 48, 48, 1)
        target = None
        if targets is not None:
            target = basic_7emotion_names.index(targets.lower().replace("angry", "anger"))
        #feature = feature / 255.
        #feature = feature.astype('float32')
        if data_axes != model_axes:
            feature = feature.transpose(*[data_axes.index(axis) for axis in model_axes])

        num_samples = feature.shape[3]
        predictions = []
        for i in range(num_samples / batch_size):
            # TODO FIX ME, after grayscale
            predictions.append(classifier(feature[0,:,:,i*batch_size:(i+1)*batch_size][numpy.newaxis,:,:,:])) #XXX:numpy.newaxis

        # for modulo we pad with garbage
        if batch_size > num_samples:
            modulo = batch_size - num_samples
        else:
            modulo = num_samples % batch_size

        if modulo != 0:
            # TODO FIX ME, after grayscale
            shape = [1, feature.shape[1], feature.shape[2], modulo]
            padding = numpy.ones((shape)).astype('float32')
            # TODO FIX ME, after grayscale
            feature = numpy.concatenate((feature[0,:,:,(num_samples/batch_size) * batch_size:][numpy.newaxis,:,:,:], padding), axis = 3) #XXX:axis,numpy.newaxis
            predictions.append(classifier(feature)[:batch_size - modulo])
        targets_ints.append(target)
        predictions = numpy.concatenate(predictions, axis=0)
        misclass.append(accuracy(predictions, target))
        frame_misclass.append(framewise_accuracy(predictions, target))
        stats.append(get_stats(predictions, target))
        fileName.append(fname)

   # error = numpy.sum(misclass) / float(len(features))
   # print "clip wise: ", error, 1-error

   # frame_misclass = numpy.concatenate(frame_misclass)
   # error = frame_misclass.sum() / float(len(frame_misclass))
   # print "frame wise: ", error, 1-error

    return numpy.vstack(stats), targets_ints, fileName
    def __init__(self, emotion, size = (96, 96), num_channels = 1, img_per_seq = 3):
        if isinstance(emotion, str):
            emotion = basic_7emotion_names.index(emotion.lower())

        sets = glob.glob('/data/lisa/data/faces/EmotiW/preproc/static/*/*_' + str(emotion) + '.npy')
        sets.sort()

        self.images = []
        for s in sets:
            img = numpy.memmap(s, mode='r', dtype='float32')
            img = img.view()
            img.shape = (len(img)/(size[0]*size[1]*num_channels), size[0], size[1], num_channels)
            self.images.append(img)

        self.emotion = emotion
        self.ips = img_per_seq
Example #3
0
    def load_data(which_set, source, preproc=None, size=None, prep=''):

        if source == 'original':
            dataset = AFEW2ImageSequenceDataset(preload_facetubes=False,
                                                preproc=preproc,
                                                size=size)
            train_idx, val_idx = dataset.get_standard_train_test_splits()[0]
            if which_set == 'train':
                data_idx = train_idx
            elif which_set == 'valid':
                data_idx = val_idx
            else:
                raise AssertionError

            features = []
            clip_ids = []
            targets = []

            for idx in data_idx:
                fts = dataset.get_facetubes(idx)
                tgt = basic_7emotion_names.index(dataset.get_label(idx))
                for ft in fts:
                    features.append(ft)
                    clip_ids.append(idx)
                    targets.append(tgt)
            return features, clip_ids, targets

        elif source == 'samira':
            path = "/data/lisa/data/faces/EmotiW/preproc/samira/KGL-AFEW/afew2_{}{}.pkl".format(
                which_set, prep)
            data = serial.load(path)
            return data['data_x'], data['clip_ids'], data['data_y']
        elif source == 'samira_iso':
            path = "/data/lisa/data/faces/EmotiW/preproc/samira/KGLIS-AFEWIS/afew2_{}.pkl".format(
                which_set)
            data = serial.load(path)
            return data['data_x'], data['clip_ids'], data['data_y']
        else:
            raise ValueError("Unknow source")
Example #4
0
    def __init__(self, emotion, size = (48, 48), num_channels = 3, img_per_seq = 3):
        if isinstance(emotion, str):
            self.emotion = basic_7emotion_names.index(emotion)
            emotion = emotion[0].upper() + emotion[1:]
        else:
            self.emotion = emotion
            emotion = basic_7emotion_names[emotion]

        files = glob.glob('/data/lisa/data/faces/EmotiW/preproc/seq/*/'+emotion+'/*.npy')
        files.sort()

        self.seq = []
        self.lgts = []
        for f in files:
            seq = numpy.memmap(f, mode='r', dtype='float32')
            lgt = (len(seq)/(size[0]*size[1]*num_channels))
            seq.shape = (lgt, size[0], size[1], num_channels)
            self.seq.append(seq)

        self.ips = img_per_seq
        self.size = size
        self.num_channels = num_channels
Example #5
0
    def __init__(self, emotion, size=(48, 48), num_channels=3, img_per_seq=3):
        if isinstance(emotion, str):
            self.emotion = basic_7emotion_names.index(emotion)
            emotion = emotion[0].upper() + emotion[1:]
        else:
            self.emotion = emotion
            emotion = basic_7emotion_names[emotion]

        files = glob.glob('/data/lisa/data/faces/EmotiW/preproc/seq/*/' +
                          emotion + '/*.npy')
        files.sort()

        self.seq = []
        self.lgts = []
        for f in files:
            seq = numpy.memmap(f, mode='r', dtype='float32')
            lgt = (len(seq) / (size[0] * size[1] * num_channels))
            seq.shape = (lgt, size[0], size[1], num_channels)
            self.seq.append(seq)

        self.ips = img_per_seq
        self.size = size
        self.num_channels = num_channels
Example #6
0
    def load_data(which_set,source, preproc=None, size=None, prep = ''):

        if source == 'original':
            dataset = AFEW2ImageSequenceDataset(preload_facetubes=False,
                        preproc=preproc, size=size)
            train_idx, val_idx = dataset.get_standard_train_test_splits()[0]
            if which_set == 'train':
                data_idx = train_idx
            elif which_set == 'valid':
                data_idx = val_idx
            else:
                raise AssertionError

            features = []
            clip_ids = []
            targets = []

            for idx in data_idx:
                fts = dataset.get_facetubes(idx)
                tgt = basic_7emotion_names.index(dataset.get_label(idx))
                for ft in fts:
                    features.append(ft)
                    clip_ids.append(idx)
                    targets.append(tgt)
            return features, clip_ids, targets

        elif source == 'samira':
            path = "/data/lisa/data/faces/EmotiW/preproc/samira/KGL-AFEW/afew2_{}{}.pkl".format(which_set, prep)
            data = serial.load(path)
            return data['data_x'], data['clip_ids'], data['data_y']
        elif source == 'samira_iso':
            path = "/data/lisa/data/faces/EmotiW/preproc/samira/KGLIS-AFEWIS/afew2_{}.pkl".format(which_set)
            data = serial.load(path)
            return data['data_x'], data['clip_ids'], data['data_y']
        else:
            raise ValueError("Unknow source")
Example #7
0
    def __init__(self, which_set, sequence_length = 3, preload_facetubes=True,
                 batch_size = None, preproc=[], size=(96, 96),
                 greyscale = False):

        if which_set == 'train':
            which_set = 'Train'
        elif which_set == 'valid':
            which_set = 'Val'
        if which_set not in ['Train', 'Val']:
            raise ValueError(
                "Unrecognized value for 'which_set': %s. "
                "Valid values are 'Train' and 'Val'." % which_set)

        if not preload_facetubes:
            raise NotImplementedError(
                "For now, we need to preload all facetubes")

        dataset = AFEW2ImageSequenceDataset(preload_facetubes=False,
                                            preproc=preproc,
                                            size=size)

        self.dataset = dataset

        train_idx, val_idx = dataset.get_standard_train_test_splits()[0]
        if which_set == 'Train':
            data_idx = train_idx
        elif which_set == 'Val':
            data_idx = val_idx
        else:
            raise AssertionError

        if preload_facetubes:
            _features = []
            _clip_ids = []
            _targets = []

            for idx in data_idx:
                fts = dataset.get_facetubes(idx)
                tgt = basic_7emotion_names.index(dataset.get_label(idx))
                for ft in fts:
                    temp = []
                    for frame in ft:
                        if greyscale:
                            temp.append(cv2.cvtColor(frame, cv.CV_BGR2GRAY))
                        else:
                            temp.append(frame)
                    ft = numpy.array(temp)
                    _features.append(ft)
                    _clip_ids.append(idx)
                    _targets.append(tgt)

            features = []
            #self.clip_ids = []
            targets = []
            count = 0
            for feat, clip_id, target in zip(_features, _clip_ids, _targets):
                # duplicate frames at the end if it's not modulo of sequence_length
                modulo = feat.shape[0] % sequence_length
                #if modulo != 0:
                    # TODO return a warning here
                    #feat = numpy.concatenate((feat, feat[-modulo,:,:,:][None,:,:,:]))
                for i in xrange(feat.shape[0] - sequence_length + 1):
                    features.append(feat[i:i+sequence_length,:,:])
                    assert len(features[-1]) == sequence_length
                    count += 1
                    #self.clip_ids.append(clip_id)
                    targets.append(target)

            self.n_samples = count
            feat_shape = features[0].shape
            features = numpy.concatenate(features)
            features = features.reshape((self.n_samples, sequence_length * numpy.product(feat_shape[1:])))

        one_hot = numpy.zeros((self.n_samples, 7), dtype = 'float32')
        for i in xrange(self.n_samples):
            one_hot[i, targets[i]] = 1.
        targets = one_hot

        '''
        if batch_size is not None and self.n_samples % batch_size != 0:
            warnings.warn("since batch size is forced adding some duplicate data, be carefull when comparing results. fixed batch size is needed usually for convolution networks")
            self.n_samples = self.n_samples - (self.n_samples % batch_size)
            features = features[:self.n_samples]
            targets = targets[:self.n_samples]
        '''

        #view_converter = dense_design_matrix.DefaultViewConverter((sequence_length, 96, 96, 3), axes = ('b', 't', 0, 1, 'c'))
        super(AFEW2FaceTubes, self).__init__(X = features, y = targets, axes = ('b', 'c', 0, 1))
Example #8
0
def run_model(which_set,
              classifier,
              batch_size,
              model_axes,
              data_axes=('b', 0, 1, 'c')):
    dset = SeqDataset(which_set=which_set)
    #indices = dset.get_filtered_indices(perturbations=['0'], flips=[False])
    indices = range(dset.numSamples)
    #indices = [2,3,4]
    #print indices
    targets_ints = []
    stats = []
    fileName = []

    for n in indices:
        features, targets, fname = dset.get_clip(n)
        misclass = []
        frame_misclass = []

        feature = features.reshape(len(features), 48, 48, 1)
        target = None
        if targets is not None:
            target = basic_7emotion_names.index(targets.lower().replace(
                "angry", "anger"))
        #feature = feature / 255.
        #feature = feature.astype('float32')
        if data_axes != model_axes:
            feature = feature.transpose(
                *[data_axes.index(axis) for axis in model_axes])

        num_samples = feature.shape[3]
        predictions = []
        for i in range(num_samples / batch_size):
            # TODO FIX ME, after grayscale
            predictions.append(
                classifier(feature[0, :, :,
                                   i * batch_size:(i + 1) * batch_size]
                           [numpy.newaxis, :, :, :]))  #XXX:numpy.newaxis

        # for modulo we pad with garbage
        if batch_size > num_samples:
            modulo = batch_size - num_samples
        else:
            modulo = num_samples % batch_size

        if modulo != 0:
            # TODO FIX ME, after grayscale
            shape = [1, feature.shape[1], feature.shape[2], modulo]
            padding = numpy.ones((shape)).astype('float32')
            # TODO FIX ME, after grayscale
            feature = numpy.concatenate(
                (feature[0, :, :, (num_samples / batch_size) *
                         batch_size:][numpy.newaxis, :, :, :], padding),
                axis=3)  #XXX:axis,numpy.newaxis
            predictions.append(classifier(feature)[:batch_size - modulo])
        targets_ints.append(target)
        predictions = numpy.concatenate(predictions, axis=0)
        misclass.append(accuracy(predictions, target))
        frame_misclass.append(framewise_accuracy(predictions, target))
        stats.append(get_stats(predictions, target))
        fileName.append(fname)

# error = numpy.sum(misclass) / float(len(features))
# print "clip wise: ", error, 1-error

# frame_misclass = numpy.concatenate(frame_misclass)
# error = frame_misclass.sum() / float(len(frame_misclass))
# print "frame wise: ", error, 1-error

    return numpy.vstack(stats), targets_ints, fileName