Esempio n. 1
0
def run_model(which_set, classifier, batch_size, model_axes, data_axes=('b', 0, 1, 'c')):
    dset = SeqDataset(which_set=which_set)
    #indices = dset.get_filtered_indices(perturbations=['0'], flips=[False])
    indices = range(dset.numSamples)
    #indices = [2,3,4]
    #print indices
    targets_ints = []
    stats = []
    fileName = []

    for n in indices:
        features, targets, fname = dset.get_clip(n)
        misclass = []
        frame_misclass = []
        
        feature = features.reshape(len(features), 48, 48, 1)
        target = None
        if targets is not None:
            target = basic_7emotion_names.index(targets.lower().replace("angry", "anger"))
        #feature = feature / 255.
        #feature = feature.astype('float32')
        if data_axes != model_axes:
            feature = feature.transpose(*[data_axes.index(axis) for axis in model_axes])

        num_samples = feature.shape[3]
        predictions = []
        for i in range(num_samples / batch_size):
            # TODO FIX ME, after grayscale
            predictions.append(classifier(feature[0,:,:,i*batch_size:(i+1)*batch_size][numpy.newaxis,:,:,:])) #XXX:numpy.newaxis

        # for modulo we pad with garbage
        if batch_size > num_samples:
            modulo = batch_size - num_samples
        else:
            modulo = num_samples % batch_size

        if modulo != 0:
            # TODO FIX ME, after grayscale
            shape = [1, feature.shape[1], feature.shape[2], modulo]
            padding = numpy.ones((shape)).astype('float32')
            # TODO FIX ME, after grayscale
            feature = numpy.concatenate((feature[0,:,:,(num_samples/batch_size) * batch_size:][numpy.newaxis,:,:,:], padding), axis = 3) #XXX:axis,numpy.newaxis
            predictions.append(classifier(feature)[:batch_size - modulo])
        targets_ints.append(target)
        predictions = numpy.concatenate(predictions, axis=0)
        misclass.append(accuracy(predictions, target))
        frame_misclass.append(framewise_accuracy(predictions, target))
        stats.append(get_stats(predictions, target))
        fileName.append(fname)

   # error = numpy.sum(misclass) / float(len(features))
   # print "clip wise: ", error, 1-error

   # frame_misclass = numpy.concatenate(frame_misclass)
   # error = frame_misclass.sum() / float(len(frame_misclass))
   # print "frame wise: ", error, 1-error

    return numpy.vstack(stats), targets_ints, fileName
Esempio n. 2
0
def get_data(which_set, clip):
    data = SeqDataset(which_set=which_set)

    # organize axis
    data_axes = data.get_data_specs()[0].components[0].axes
    if 't' in data_axes:
        data_axes = [axis for axis in data_axes if axis not in ['t']]

    #targets = numpy.argmax(data.targets, axis=1)
    return data.get_clip(clip) + (data_axes,)
Esempio n. 3
0
def get_data(which_set, clip):
    data = SeqDataset(which_set=which_set)

    # organize axis
    data_axes = data.get_data_specs()[0].components[0].axes
    if 't' in data_axes:
        data_axes = [axis for axis in data_axes if axis not in ['t']]

    #targets = numpy.argmax(data.targets, axis=1)
    return data.get_clip(clip) + (data_axes, )
Esempio n. 4
0
def run_model(which_set,
              classifier,
              batch_size,
              model_axes,
              data_axes=('b', 0, 1, 'c')):
    dset = SeqDataset(which_set=which_set)
    #indices = dset.get_filtered_indices(perturbations=['0'], flips=[False])
    indices = range(dset.numSamples)
    #indices = [2,3,4]
    #print indices
    targets_ints = []
    stats = []
    fileName = []

    for n in indices:
        features, targets, fname = dset.get_clip(n)
        misclass = []
        frame_misclass = []

        feature = features.reshape(len(features), 48, 48, 1)
        target = None
        if targets is not None:
            target = basic_7emotion_names.index(targets.lower().replace(
                "angry", "anger"))
        #feature = feature / 255.
        #feature = feature.astype('float32')
        if data_axes != model_axes:
            feature = feature.transpose(
                *[data_axes.index(axis) for axis in model_axes])

        num_samples = feature.shape[3]
        predictions = []
        for i in range(num_samples / batch_size):
            # TODO FIX ME, after grayscale
            predictions.append(
                classifier(feature[0, :, :,
                                   i * batch_size:(i + 1) * batch_size]
                           [numpy.newaxis, :, :, :]))  #XXX:numpy.newaxis

        # for modulo we pad with garbage
        if batch_size > num_samples:
            modulo = batch_size - num_samples
        else:
            modulo = num_samples % batch_size

        if modulo != 0:
            # TODO FIX ME, after grayscale
            shape = [1, feature.shape[1], feature.shape[2], modulo]
            padding = numpy.ones((shape)).astype('float32')
            # TODO FIX ME, after grayscale
            feature = numpy.concatenate(
                (feature[0, :, :, (num_samples / batch_size) *
                         batch_size:][numpy.newaxis, :, :, :], padding),
                axis=3)  #XXX:axis,numpy.newaxis
            predictions.append(classifier(feature)[:batch_size - modulo])
        targets_ints.append(target)
        predictions = numpy.concatenate(predictions, axis=0)
        misclass.append(accuracy(predictions, target))
        frame_misclass.append(framewise_accuracy(predictions, target))
        stats.append(get_stats(predictions, target))
        fileName.append(fname)

# error = numpy.sum(misclass) / float(len(features))
# print "clip wise: ", error, 1-error

# frame_misclass = numpy.concatenate(frame_misclass)
# error = frame_misclass.sum() / float(len(frame_misclass))
# print "frame wise: ", error, 1-error

    return numpy.vstack(stats), targets_ints, fileName