Beispiel #1
0
def visualize_layer(l):
    print l.param()
    if len(l.param()) < 1:
        return None
    filters = l.param()[0].data()
    _ = visualize.show_multiple(filters.T)
    return _
Beispiel #2
0
def layer_overview_png(checkpoint, layername):
    model = get_models()[checkpoint]
    layer = model.layers[layername]
    (num_filters, ksize, num_channels) = get_layer_dimensions(layer)
    reshaped = reshape_layer_for_visualization(layer, combine_channels=(num_channels == 3))
    ncols = 6 if num_channels in (1, 3) else num_channels
    return show_multiple(normalize(reshaped), ncols=ncols)
Beispiel #3
0
def visualize_layer(l):
    print l.param()
    if len(l.param()) < 1:
        return None
    filters = l.param()[0].data()
    _ = visualize.show_multiple(filters.T)
    return _
Beispiel #4
0
def layer_overview_png(checkpoint, layername):
    model = get_models()[checkpoint]
    layer = model.layers[layername]
    (num_filters, ksize, num_channels) = get_layer_dimensions(layer)
    reshaped = reshape_layer_for_visualization(
        layer, combine_channels=(num_channels == 3))
    ncols = 6 if num_channels in (1, 3) else num_channels
    return show_multiple(normalize(reshaped), ncols=ncols)
Beispiel #5
0
def visualize_complex_layer(l):
    #Should probably just be checking if layer is a convolution.
    if len(l.param()) < 1 or 'num_kernels' not in l.spec:
        return None
    
    nfilters = l.spec['num_kernels']
    ksize = l.spec['ksize']
    channels = l.param()[0].data().shape[0]/(ksize*ksize)
    
    # make the right filter shape
    filters = l.param()[0].data()
    filters = filters.T.reshape(nfilters, ksize, ksize, channels)
    filters = filters.swapaxes(2,3).swapaxes(1,2).reshape(nfilters*channels, ksize, ksize)
    _ = visualize.show_multiple(filters, ncols=channels)
    return _
Beispiel #6
0
def visualize_complex_layer(l):
    #Should probably just be checking if layer is a convolution.
    if len(l.param()) < 1 or 'num_kernels' not in l.spec:
        return None

    nfilters = l.spec['num_kernels']
    ksize = l.spec['ksize']
    channels = l.param()[0].data().shape[0] / (ksize * ksize)

    # make the right filter shape
    filters = l.param()[0].data()
    filters = filters.T.reshape(nfilters, ksize, ksize, channels)
    filters = filters.swapaxes(2,
                               3).swapaxes(1,
                                           2).reshape(nfilters * channels,
                                                      ksize, ksize)
    _ = visualize.show_multiple(filters, ncols=channels)
    return _
Beispiel #7
0
def select_region(model, times=ALL, layers=ALL, filters=ALL, channels=ALL, apply_prediction=None):
    #Default is to treat None as "All"
    if times is ALL:
        times = range(len(model))
    
    if layers is ALL:
        #We assume all models have the same structure.
        layers = model[0].layers.keys()
        
    if filters is ALL:
        #Choose all filters for each layer.
        filters = dict([(l,range(get_layer_dimensions(model[0].layers[l])[0])) for l in layers])
        
    if channels is ALL:
        #Choose all channels for each layer.
        channels = dict([(l,range(get_layer_dimensions(model[0].layers[l])[2])) for l in layers])
        
    #Do something reasonable if filters is a list.
    if isinstance(filters, list):
        newfilters = {}
        for l in layers:
            (nfilters, ksize, nchannels) = get_layer_dimensions(model[0].layers[l])
            newfilters[l] = sorted(set(filters) & set(range(nfilters)))
        filters = newfilters
    
    #Do something reasonable if channels is a list.
    if isinstance(channels, list):
        newchannels = {}
        for l in layers:
            (nfilters, ksize, nchannels) = get_layer_dimensions(model[0].layers[l])
            newchannels[l] = sorted(set(channels) & set(range(nchannels)))
        channels = newchannels    
    
    #We now are sure we have a list of layer names and two dicts of layer-name/filters 
    #and layer-name/channels pairs.
    
    #Request a point for each combination of layers, filters, and channels.
    print "Times: %s" % times
    print "Layers: %s" % layers
    print "Filters: %s" % filters
    print "Channels: %s" % channels
    
    #Initialize output.
    region = dict([(t,{}) for t in times])
    
    for t in times:
        #If the user asks for a prediction, apply it to the whole set of models and persist the intermediate results.
        if apply_prediction is not None:
            #FIXME: This assumes that output blobs will have the same name as the user selected layers, 
            #with "_cudanet_out" at the end of them. 
            blobs = ["%s_cudanet_out" % l for l in layers]
            predicted_model = model[t].predict(data=apply_prediction, output_blobs=blobs)
            print predicted_model
            
        for l in layers:
            region[t][l] = {}
            
            if apply_prediction is not None:
                
                #FIXME
                shaped_layer = predicted_model["%s_cudanet_out" % l]
                print shaped_layer.shape
                shaped_region = shaped_layer[:,:,:,filters[l]]
                flat_region = flatten_filters(shaped_region, len(filters[l]), 1, shaped_layer.shape[1])
                #So we know we're getting a 10x32x32 result. We probably want 10 32x32 images.

                
                #Todo - need to figure out the shape of a prediction and then map that to our output image shape.
            else:
                shaped_layer = reshape_layer_for_visualization(model[t].layers[l], preserve_dims=True)
            
                #I believe I am somehow abusing numpy's indexing here, but this seems to work.
                shaped_region = shaped_layer[filters[l],:,:,:][:,:,:,channels[l]]
                flat_region = flatten_filters(shaped_region, len(filters[l]), len(channels[l]), shaped_layer.shape[1])


            print flat_region.shape            
            region[t][l] = show_multiple(flat_region, ncols=len(channels[l]))
            
            #This was here when we wanted one image per filter.
            # for f in filters[l]:
            #     region[t][l][f] = {}
            #     for c in channels[l]:
            #         region[t][l][f][c] = select_point(shaped_layer, f, c)
    
    #Region should be a dict of {time: {layer: image}}
    return region
Beispiel #8
0
def select_region(model,
                  times=ALL,
                  layers=ALL,
                  filters=ALL,
                  channels=ALL,
                  apply_prediction=None):
    #Default is to treat None as "All"
    if times is ALL:
        times = range(len(model))

    if layers is ALL:
        #We assume all models have the same structure.
        layers = model[0].layers.keys()

    if filters is ALL:
        #Choose all filters for each layer.
        filters = dict([(l, range(get_layer_dimensions(model[0].layers[l])[0]))
                        for l in layers])

    if channels is ALL:
        #Choose all channels for each layer.
        channels = dict([(l,
                          range(get_layer_dimensions(model[0].layers[l])[2]))
                         for l in layers])

    #Do something reasonable if filters is a list.
    if isinstance(filters, list):
        newfilters = {}
        for l in layers:
            (nfilters, ksize,
             nchannels) = get_layer_dimensions(model[0].layers[l])
            newfilters[l] = sorted(set(filters) & set(range(nfilters)))
        filters = newfilters

    #Do something reasonable if channels is a list.
    if isinstance(channels, list):
        newchannels = {}
        for l in layers:
            (nfilters, ksize,
             nchannels) = get_layer_dimensions(model[0].layers[l])
            newchannels[l] = sorted(set(channels) & set(range(nchannels)))
        channels = newchannels

    #We now are sure we have a list of layer names and two dicts of layer-name/filters
    #and layer-name/channels pairs.

    #Request a point for each combination of layers, filters, and channels.
    print "Times: %s" % times
    print "Layers: %s" % layers
    print "Filters: %s" % filters
    print "Channels: %s" % channels

    #Initialize output.
    region = dict([(t, {}) for t in times])

    for t in times:
        #If the user asks for a prediction, apply it to the whole set of models and persist the intermediate results.
        if apply_prediction is not None:
            #FIXME: This assumes that output blobs will have the same name as the user selected layers,
            #with "_cudanet_out" at the end of them.
            blobs = ["%s_cudanet_out" % l for l in layers]
            predicted_model = model[t].predict(data=apply_prediction,
                                               output_blobs=blobs)
            print predicted_model

        for l in layers:
            region[t][l] = {}

            if apply_prediction is not None:

                #FIXME
                shaped_layer = predicted_model["%s_cudanet_out" % l]
                print shaped_layer.shape
                shaped_region = shaped_layer[:, :, :, filters[l]]
                flat_region = flatten_filters(shaped_region, len(filters[l]),
                                              1, shaped_layer.shape[1])
                #So we know we're getting a 10x32x32 result. We probably want 10 32x32 images.

                #Todo - need to figure out the shape of a prediction and then map that to our output image shape.
            else:
                shaped_layer = reshape_layer_for_visualization(
                    model[t].layers[l], preserve_dims=True)

                #I believe I am somehow abusing numpy's indexing here, but this seems to work.
                shaped_region = shaped_layer[filters[l], :, :, :][:, :, :,
                                                                  channels[l]]
                flat_region = flatten_filters(shaped_region, len(filters[l]),
                                              len(channels[l]),
                                              shaped_layer.shape[1])

            print flat_region.shape
            region[t][l] = show_multiple(flat_region, ncols=len(channels[l]))

            #This was here when we wanted one image per filter.
            # for f in filters[l]:
            #     region[t][l][f] = {}
            #     for c in channels[l]:
            #         region[t][l][f][c] = select_point(shaped_layer, f, c)

    #Region should be a dict of {time: {layer: image}}
    return region