예제 #1
0
def test_captures_disctinct_features():

    caffe.set_device(0)  # if we have multiple GPUs, pick the first one
    caffe.set_mode_gpu()

    net = network.CaffeNet(
        'deep_introspection/test/VGG.prototxt',
        'deep_introspection/test/VGG_ILSVRC_16_layers.caffemodel')

    img, offset, resFac, newSize = utils.imgPreprocess(
        img_path='deep_introspection/test/starfish.jpg')
    net.set_new_size(newSize)
    relevances = lrp.calculate_lrp_heatmap(net, img)

    clusters = features.extract_features_from_relevances(relevances)

    # Checks that all clusters have more than 10 points
    assert (all(map(lambda x: len(x) > 10, clusters)))

    # Checks that all points are greater than the threshold
    for cluster in clusters:
        assert (all(
            map(
                lambda x: abs(relevances[x]) >= 5 / relevances.flatten().shape[
                    0], cluster)))
예제 #2
0
def test_boundary_synthesis():

    net = network.CaffeNet('deep_introspection/test/VGG.prototxt', 'deep_introspection/test/VGG_ILSVRC_16_layers.caffemodel')

    target = np.zeros(1000)
    target[285] = 0.5
    target[7] = 0.5
    solution, loss = synthesis.synthesise(net, target, 'prob')

    assert(solution.shape == (224,224,3))
예제 #3
0
def test_boundary():

    net = network.CaffeNet('deep_introspection/test/VGG.prototxt', 'deep_introspection/test/VGG_ILSVRC_16_layers.caffemodel')
    img, offset, resFac, newSize = utils.imgPreprocess(img_path='deep_introspection/test/cat.jpg')

    net.set_new_size(newSize)

    layer = net.get_layer_names()[-2]
    solution, loss = synthesis.synthesise_boundary(net, img, layer,  185, 120, 140, 95)

    assert(solution.shape == (224,224,3))
예제 #4
0
def synthesise(request, model, image, feature):
    if request.method == 'POST':
        features_path = 'features/model_'+ str(model) + '_image_' + str(image) + '.dat'

        clusters = read_clusters(features_path)

        cluster = np.array(clusters[feature])


        img_path = TestImage.objects.filter(id=image).first().image
        test_model = TestModel.objects.filter(id=model).first()

        architecture = str(test_model.architecture)
        weights = str(test_model.weights)
        labels = str(test_model.labels)

        if architecture.split(".")[-1].lower() == "meta":
            net = network.TensorFlowNet(architecture, './models/'+ str(test_model.user) +'_' + test_model.name + '/')
            img = imread(img_path, mode='RGB')
            img = imresize(img, (224, 224))

        else:
            net = network.CaffeNet(architecture, weights)
            img, offset, resFac, newSize = utils.imgPreprocess(img_path=img_path)
            net.set_new_size(newSize)

        xmax, ymax, xmin, ymin = np.max(cluster[:,0]), np.max(cluster[:,1]), np.min(cluster[:,0]), np.min(cluster[:,1])

        feature_img, _ = synthesis.synthesise_boundary(net, img, xmax, ymax, xmin, ymin)
        mean = np.array([103.939, 116.779, 123.68])
        feature_img[:,:,0] += mean[2]
        feature_img[:,:,1] += mean[1]
        feature_img[:,:,2] += mean[0]

        num = FeatureImage.objects.filter(model__id=model,image__id=image,feature=feature).count()

        # save synthesised image
        feature_img = Image.fromarray(np.uint8(feature_img))
        feature_path = 'synthesised_features/model_'+ str(model) + '_image_' + str(image) + '_' + str(feature) + '_' + str(num) + '.jpg'
        feature_img.save(feature_path)

        featureImage = FeatureImage(model = test_model, image=TestImage.objects.filter(id=image).first(), feature=feature, feature_image=feature_path)
        featureImage.save()
        return HttpResponse("{\"image\": " + feature_path +"}")
    else:
        return HttpResponse("{message: \"Invalid method.\"}", status=405)
예제 #5
0
def test_synthesise_loss_is_low():
    net = network.CaffeNet('deep_introspection/test/VGG.prototxt', 'deep_introspection/test/VGG_ILSVRC_16_layers.caffemodel')
    img, offset, resFac, newSize = utils.imgPreprocess(img_path='deep_introspection/test/cat.jpg')

    net.set_new_size(newSize)
    print(np.argmax(np.mean(net.predict(img), axis=0)))

    layer = net.get_layer_names()[-1]
    print("Testing layer " + layer)

    target_rep = net.get_activations(layer)

    solution, loss = synthesis.synthesise(net, target_rep,layer)
    net.set_new_size(solution.shape[:2])
    net.predict(solution)

    assert(solution.shape == (224,224,3))
    assert(synthesis.loss(net.get_activations(layer), target_rep) < 0.1)
예제 #6
0
def evaluate(request, model, image):
    features_path = 'features/model_' + str(model) + '_image_' + str(
        image) + '.dat'

    body = json.loads(request.body.decode("utf-8"))
    inactive_indices = body['inactiveFeatures']
    clusters = read_clusters(features_path)
    inactive_features = [clusters[i] for i in inactive_indices]
    inactive_features = list(
        map(
            lambda cluster: list(
                itertools.chain.from_iterable(
                    map(
                        lambda x:
                        [tuple(x + [0]),
                         tuple(x + [1]),
                         tuple(x + [2])], cluster))), inactive_features))

    img_path = TestImage.objects.filter(id=image).first().image
    test_model = TestModel.objects.filter(id=model).first()

    architecture = str(test_model.architecture)
    weights = str(test_model.weights)
    labels = str(test_model.labels)

    if architecture.split(".")[-1].lower() == "meta":
        net = network.TensorFlowNet(
            architecture,
            './models/' + str(test_model.user) + '_' + test_model.name + '/')
    else:
        net = network.CaffeNet(architecture, weights)

    predictions, img = predictions_from_features(net, img_path,
                                                 inactive_features)

    top_predictions = get_top_predictions(predictions, 5, labels)
    # save modified image
    img = Image.fromarray(np.uint8(img))
    modification_path = 'features/model_' + str(model) + '_image_' + str(
        image) + '_' + '_'.join(str(f) for f in inactive_indices) + '.jpg'
    img.save(modification_path)
    return HttpResponse("{\"predictions\":" + json.dumps(top_predictions) +
                        ", \"image\": \"" + 'media/' + modification_path +
                        "\"}")
예제 #7
0
def analyse(request, model, image):

    features_path = 'features/model_' + str(model) + '_image_' + str(
        image) + '.dat'
    clusters = read_clusters(features_path)
    num_clusters = len(clusters)

    img_path = TestImage.objects.filter(id=image).first().image
    test_model = TestModel.objects.filter(id=model).first()

    architecture = str(test_model.architecture)
    weights = str(test_model.weights)
    labels = str(test_model.labels)

    if architecture.split(".")[-1].lower() == "meta":
        net = network.TensorFlowNet(
            architecture,
            './models/' + str(test_model.user) + '_' + test_model.name + '/')
    else:
        net = network.CaffeNet(architecture, weights)

    predictions, _ = predictions_from_features(net, img_path, [])
    basic_predictions = get_top_predictions(predictions, -1, labels)
    predicted = basic_predictions[0]

    # Find features for largest change
    largest_change = 0
    features = []

    for i in range(1):
        num = np.random.randint(low=0, high=2**num_clusters)
        b = [num >> j & 1 for j in range(num.bit_length() - 1, -1, -1)]
        b = [0] * (num_clusters - len(b)) + b
        inactive_indices = list(itertools.compress(range(num_clusters), b))
        inactive_features = [clusters[i] for i in inactive_indices]
        inactive_features = list(
            map(
                lambda cluster: list(
                    itertools.chain.from_iterable(
                        map(
                            lambda x:
                            [tuple(x + [0]),
                             tuple(x + [1]),
                             tuple(x + [2])], cluster))), inactive_features))

        predictions, img = predictions_from_features(net, img_path,
                                                     inactive_features)

        change = predicted['value'] - predictions[predicted['index']]
        if change > largest_change:
            largest_change = change
            features = inactive_indices

    inactive_features = [clusters[i] for i in features]
    inactive_features = list(
        map(
            lambda cluster: list(
                itertools.chain.from_iterable(
                    map(
                        lambda x:
                        [tuple(x + [0]),
                         tuple(x + [1]),
                         tuple(x + [2])], cluster))), inactive_features))

    predictions, img = predictions_from_features(net, img_path,
                                                 inactive_features)

    img = Image.fromarray(np.uint8(img))
    modification_path = 'features/model_' + str(model) + '_image_' + str(
        image) + '_' + '_'.join(str(f) for f in features) + '.jpg'
    img.save(modification_path)
    top_predictions = get_top_predictions(predictions, 5, labels)
    lc = {'features': features, 'predictions': top_predictions}

    # Find most important feature
    biggest_feature = 0
    biggest_change = 0
    for i in range(num_clusters):
        cluster = clusters[i]
        inactive_features = list(
            itertools.chain.from_iterable(
                map(lambda x: [tuple(x + [0]),
                               tuple(x + [1]),
                               tuple(x + [2])], cluster)))
        predictions, _ = predictions_from_features(net, img_path,
                                                   [inactive_features])
        change = predicted['value'] - predictions[predicted['index']]
        if change > biggest_change:
            biggest_change = change
            biggest_feature = i

    inactive_features = list(
        itertools.chain.from_iterable(
            map(lambda x: [tuple(x + [0]),
                           tuple(x + [1]),
                           tuple(x + [2])], clusters[biggest_feature])))
    predictions, img = predictions_from_features(net, img_path,
                                                 [inactive_features])
    img = Image.fromarray(np.uint8(img))
    modification_path = 'features/model_' + str(model) + '_image_' + str(
        image) + '_' + str(biggest_feature) + '.jpg'
    img.save(modification_path)
    top_predictions = get_top_predictions(predictions, 5, labels)

    mi = {'feature': biggest_feature, 'predictions': top_predictions}

    # Find minimal features required
    selection = []
    features_found = False
    for num in range(num_clusters):
        if num == 0:
            inactive_features = list(itertools.chain.from_iterable(clusters))
            inactive_features = list(
                itertools.chain.from_iterable(
                    map(
                        lambda x:
                        [tuple(x + [0]),
                         tuple(x + [1]),
                         tuple(x + [2])], inactive_features)))
            predictions, img = predictions_from_features(
                net, img_path, [inactive_features])
            top_predictions = get_top_predictions(predictions, 5, labels)
            # No features are required
            if top_predictions[0]['index'] == predicted['index']:
                img = Image.fromarray(np.uint8(img))
                modification_path = 'features/model_' + str(
                    model) + '_image_' + str(image) + '_' + '_'.join(
                        str(f) for f in range(num_clusters)) + '.jpg'
                img.save(modification_path)
                features_found = True
        else:
            combinations = itertools.combinations(range(num_clusters), num)
            for selection in combinations:
                inactive_indices = list(
                    set(range(num_clusters)) - set(selection))
                inactive_features = [clusters[i] for i in inactive_indices]
                inactive_features = list(
                    map(
                        lambda cluster: list(
                            itertools.chain.from_iterable(
                                map(
                                    lambda x: [
                                        tuple(x + [0]),
                                        tuple(x + [1]),
                                        tuple(x + [2])
                                    ], cluster))), inactive_features))

                predictions, img = predictions_from_features(
                    net, img_path, inactive_features)
                top_predictions = get_top_predictions(predictions, 5, labels)

                if top_predictions[0]['index'] == predicted['index']:
                    img = Image.fromarray(np.uint8(img))
                    modification_path = 'features/model_' + str(
                        model) + '_image_' + str(image) + '_' + '_'.join(
                            str(f) for f in inactive_indices) + '.jpg'
                    img.save(modification_path)
                    features_found = True
                    break
        if features_found:
            break

    mfRequired = {'features': selection, 'predictions': top_predictions}

    # Find minimal to change
    selection = []
    features_found = False
    for num in range(num_clusters):
        if num == 0:
            continue

        combinations = itertools.combinations(range(num_clusters), num)
        for selection in combinations:
            inactive_indices = list(selection)
            inactive_features = [clusters[i] for i in inactive_indices]
            inactive_features = list(
                map(
                    lambda cluster: list(
                        itertools.chain.from_iterable(
                            map(
                                lambda x: [
                                    tuple(x + [0]),
                                    tuple(x + [1]),
                                    tuple(x + [2])
                                ], cluster))), inactive_features))

            predictions, img = predictions_from_features(
                net, img_path, inactive_features)
            top_predictions = get_top_predictions(predictions, 5, labels)
            if top_predictions[0]['index'] != predicted['index']:
                img = Image.fromarray(np.uint8(img))
                modification_path = 'features/model_' + str(
                    model) + '_image_' + str(image) + '_' + '_'.join(
                        str(f) for f in inactive_indices) + '.jpg'
                img.save(modification_path)
                features_found = True
                break

        if features_found:
            break

    if not features_found:
        selection = []
    mfPerturbation = {'features': selection, 'predictions': top_predictions}

    results = {
        'originalClass': predicted['label'],
        'lc': lc,
        'mi': mi,
        'mfRequired': mfRequired,
        'mfPerturbation': mfPerturbation
    }

    return HttpResponse("{\"results\":" + json.dumps(results) + "}")
예제 #8
0
def index(request, model, image):
    features_path = 'features/model_' + str(model) + '_image_' + str(
        image) + '.dat'
    feature_set = FeatureSet.objects.filter(model__id=model,
                                            image__id=image).first()

    unmodified_path = 'features/model_' + str(model) + '_image_' + str(
        image) + '_.jpg'

    if feature_set == None:
        # carry out LRP and clustering and write to file

        img_path = TestImage.objects.filter(id=image).first().image
        test_model = TestModel.objects.filter(id=model).first()

        architecture = str(test_model.architecture)
        weights = str(test_model.weights)
        labels = str(test_model.labels)

        if architecture.split(".")[-1].lower() == "meta":
            net = network.TensorFlowNet(
                architecture, './models/' + str(test_model.user) + '_' +
                test_model.name + '/')
            img = imread(img_path, mode='RGB')
            img = imresize(img, (224, 224))
            im = Image.fromarray(np.uint8(img))
            im.save(unmodified_path)

        else:
            net = network.CaffeNet(architecture, weights)
            img = imread(img_path, mode='RGB')
            img = 256 * utils.imageResize(img)
            img = Image.fromarray(np.uint8(img))
            img.save(unmodified_path)
            img, offset, resFac, newSize = utils.imgPreprocess(
                img_path=img_path)
            net.set_new_size(newSize)

        relevances = lrp.calculate_lrp_heatmap(net, img)
        clusters = features.extract_features_from_relevances(relevances)

        predictions, _ = predictions_from_features(net, img_path, [])
        basic_predictions = get_top_predictions(predictions, -1, labels)
        predicted = basic_predictions[0]

        sorted_predictions = []
        for i in range(len(clusters)):
            cluster = list(
                itertools.chain.from_iterable(
                    map(lambda x: [x + (0, ), x + (1, ), x + (2, )],
                        clusters[i])))
            predictions, _ = predictions_from_features(net, img_path,
                                                       [cluster])
            diff = predicted['value'] - predictions[predicted['index']]
            sorted_predictions.append({'index': i, 'difference': diff})

        sorted_predictions = sorted(sorted_predictions,
                                    key=functools.cmp_to_key(compare))
        sorted_predictions.reverse()

        clusters = list(map(lambda x: clusters[x['index']],
                            sorted_predictions))

        overlay_shape = (img.shape[0], img.shape[1], 4)
        for i, cluster in enumerate(clusters):
            overlay_img = np.zeros(overlay_shape)
            for point in cluster:
                overlay_img[point[0], point[1], 0] = 255
                overlay_img[point[0], point[1], 3] = 128
            overlay_img = Image.fromarray(np.uint8(overlay_img))
            overlay_img.save('features/feature_model_' + str(model) +
                             '_image_' + str(image) + '_' + str(i) + '.png')

        write_clusters(features_path, clusters)
        feature_set = FeatureSet(
            model=test_model,
            image=TestImage.objects.filter(id=image).first(),
            features=features_path)
        feature_set.save()

    # Get number of features and return features
    with open(features_path) as f:
        num_features = sum(1 for _ in f)
        return HttpResponse(
            "{\"features\":" + json.dumps(list(range(num_features))) +
            ", \"image\": \"" + 'media/' + unmodified_path +
            "\",\"message\": \"features successfully retrieved.\"}")
예제 #9
0
    m, se = np.mean(a), scipy.stats.sem(a)
    h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
    return m, h


def sa(net, img):
    net.predict(img)
    layer = 'prob'
    activations = net.get_activations(layer)

    grad = net.backward(layer, activations)
    return np.mean(np.abs(grad), axis=2)

path = 'deep_introspection\\test\\shape_model\\'
#caffenet = caffe.Net('shapes_test/shapes_net_test.prototxt',caffe.TEST)
net = network.CaffeNet('shapes_test/shapes_net_test.prototxt','shapes_test/weights.caffemodel')
#
# net = network.TensorFlowNet('deep_introspection/test/shape_model/model.ckpt-40000.meta', './deep_introspection/test/shape_model/')
# fc1_weights = net.sess.graph.get_tensor_by_name('fc1/kernel:0').eval(session=net.sess)
# fc1_biases = net.sess.graph.get_tensor_by_name('fc1/bias:0').eval(session=net.sess)
# fc2_weights = net.sess.graph.get_tensor_by_name('fc2/kernel:0').eval(session=net.sess)
# fc2_biases = net.sess.graph.get_tensor_by_name('fc2/bias:0').eval(session=net.sess)
# conv1_weights = net.sess.graph.get_tensor_by_name('conv1/kernel:0').eval(session=net.sess)
# conv1_biases = net.sess.graph.get_tensor_by_name('conv1/bias:0').eval(session=net.sess)
# conv2_weights = net.sess.graph.get_tensor_by_name('conv2/kernel:0').eval(session=net.sess)
#
# conv2_biases = net.sess.graph.get_tensor_by_name('conv2/bias:0').eval(session=net.sess)
# caffenet.params['conv1'][0].data[...] = conv1_weights.T
# caffenet.params['conv1'][1].data[...] = conv1_biases
# caffenet.params['conv2'][0].data[...] = conv2_weights.T
# caffenet.params['conv2'][1].data[...] = conv2_biases
예제 #10
0
def test_caffe_no_activations_if_not_predicted():
    net = network.CaffeNet(
        'deep_introspection/test/VGG.prototxt',
        'deep_introspection/test/VGG_ILSVRC_16_layers.caffemodel')
    assert (net.get_activations('fc7') == None)
예제 #11
0
from deep_introspection import network, utils
from scipy.misc import imread, imresize

net = network.CaffeNet(
    'deep_introspection/test/VGG.prototxt',
    'deep_introspection/test/VGG_ILSVRC_16_layers.caffemodel')
img, offset, resFac, newSize = utils.imgPreprocess(
    img_path='deep_introspection/test/cat.jpg')
net.set_new_size(newSize)

tfNet = network.TensorFlowNet('deep_introspection/test/vgg16.meta',
                              './deep_introspection/test/')

img1 = imread('deep_introspection/test/cat.jpg', mode='RGB')
img1 = imresize(img1, (224, 224))


def test_caffe_conv_weights_correct_shape():
    weights = net.get_weights('conv1_1')
    assert (weights.shape == (64, 3, 3, 3))


def test_caffe_fc_weights_correct_shape():
    weights = net.get_weights('fc8')
    assert (weights.shape == (1000, 4096))


def test_caffe_activations_correct_shape():
    net.predict(img)
    activations = net.get_activations('conv1_1')
    assert (activations.shape == (64, 224, 224))