コード例 #1
0
def main():
    #pdb.set_trace()
    parser = argparse.ArgumentParser(description='Finds images in a training set that cause max activation for a network; saves results in a pickled NetMaxTracker.')
    parser.add_argument('--N', type = int, default = 9, help = 'note and save top N activations')
    parser.add_argument('--gpu', action = 'store_true', default = settings.caffevis_mode_gpu, help = 'use gpu')
    parser.add_argument('--net_prototxt', type = str, default = settings.caffevis_deploy_prototxt, help = 'network prototxt to load')
    parser.add_argument('--net_weights', type = str, default = settings.caffevis_network_weights, help = 'network weights to load')
    parser.add_argument('--datadir', type = str, default = settings.static_files_dir, help = 'directory to look for files in')
    parser.add_argument('--outfile', type=str, default = os.path.join(settings.caffevis_outputs_dir, 'find_max_acts_output.pickled'), help='output filename for pkl')
    parser.add_argument('--outdir', type = str, default = settings.caffevis_outputs_dir, help = 'Which output directory to use. Files are output into outdir/layer/unit_%%04d/{max_histogram}.png')
    parser.add_argument('--do-histograms', action = 'store_true', default = settings.max_tracker_do_histograms, help = 'Output histogram image file containing histogrma of max values per channel')
    parser.add_argument('--do-correlation', action = 'store_true', default = settings.max_tracker_do_correlation, help = 'Output correlation image file containing correlation of channels per layer')
    parser.add_argument('--search-min', action='store_true', default=True, help='Should we also search for minimal activations?')

    args = parser.parse_args()

    settings.caffevis_deploy_prototxt = args.net_prototxt
    settings.caffevis_network_weights = args.net_weights

    net, data_mean = load_network(settings)
    #data_mean = np.load('/home/mbm/Desktop/Aux/models/alexnet/places365CNN_mean.npy')

    # validate batch size
    if settings.is_siamese and settings._calculated_siamese_network_format == 'siamese_batch_pair':
        # currently, no batch support for siamese_batch_pair networks
        # it can be added by simply handle the batch indexes properly, but it should be thoroughly tested
        assert (settings.max_tracker_batch_size == 1)

    # set network batch size
    current_input_shape = net.blobs[net.inputs[0]].shape
    current_input_shape[0] = settings.max_tracker_batch_size
    net.blobs[net.inputs[0]].reshape(*current_input_shape)
    net.reshape()

    with WithTimer('Scanning images'):
        if settings.is_siamese:
            net_max_tracker = scan_pairs_for_maxes(settings, net, args.datadir, args.N, args.outdir, args.search_min)
        else: # normal operation
            net_max_tracker = scan_images_for_maxes(settings, net, args.datadir, args.N, args.outdir, args.search_min)

    save_max_tracker_to_file(args.outfile, net_max_tracker)

    if args.do_correlation:
        net_max_tracker.calculate_correlation(args.outdir)

    if args.do_histograms:
        net_max_tracker.calculate_histograms(args.outdir)
コード例 #2
0
def calculate_weights_histograms():
    try:
        # if model in command line change to it
        if '--model' in sys.argv:
            change_model_to_load(sys.argv[sys.argv.index('--model') + 1])

        import settings

        parser = argparse.ArgumentParser(
            description='Generates weights histograms for layers and its units.'
        )
        parser.add_argument(
            '--force',
            action='store_true',
            default=False,
            help='Activate force mode. Overwrites existing files')
        parser.add_argument(
            '--scale_by_layer',
            action='store_true',
            default=False,
            help='Every unit of a layer has the same scale for the histogram.')
        parser.add_argument(
            '--outdir',
            type=str,
            default=settings.caffevis_outputs_dir,
            help=
            'Which output directory to use. Files are output into outdir/layer/unit_%%04d/{weights_histogram}.png'
        )
        parser.add_argument(
            '--model',
            type=str,
            default=None,
            help=
            'Name of the model you want to change to. This overwrites the settings made in files.'
        )
        args = parser.parse_args()

        net, data_mean = load_network(settings)
        calculate_weight_bias_histograms_for_net(net,
                                                 settings,
                                                 args.outdir,
                                                 args.scale_by_layer,
                                                 force=args.force)

    finally:
        clean_temp_file()
コード例 #3
0
def main():
    parser = argparse.ArgumentParser(description='Loads a pickled NetMaxTracker and outputs one or more of {the patches of the image, a deconv patch, a backprop patch} associated with the maxes.')
    parser.add_argument('--N',            type = int, default = 9, help = 'Note and save top N activations.')
    parser.add_argument('--gpu',          action = 'store_true', default=settings.caffevis_mode_gpu, help = 'Use gpu.')
    parser.add_argument('--do-maxes',     action = 'store_true', default=settings.max_tracker_do_maxes, help = 'Output max patches.')
    parser.add_argument('--do-deconv',    action = 'store_true', default=settings.max_tracker_do_deconv, help = 'Output deconv patches.')
    parser.add_argument('--do-deconv-norm', action = 'store_true', default=settings.max_tracker_do_deconv_norm, help = 'Output deconv-norm patches.')
    parser.add_argument('--do-backprop',  action = 'store_true', default=settings.max_tracker_do_backprop, help = 'Output backprop patches.')
    parser.add_argument('--do-backprop-norm', action = 'store_true', default=settings.max_tracker_do_backprop_norm, help = 'Output backprop-norm patches.')
    parser.add_argument('--do-info',      action = 'store_true', default=settings.max_tracker_do_info, help = 'Output info file containing max filenames and labels.')
    parser.add_argument('--idx-begin',    type = int, default = None, help = 'Start at this unit (default: all units).')
    parser.add_argument('--idx-end',      type = int, default = None, help = 'End at this unit (default: all units).')
    
    parser.add_argument('--nmt_pkl',      type = str, default = os.path.join(settings.caffevis_outputs_dir, 'find_max_acts_output.pickled'), help = 'Which pickled NetMaxTracker to load.')
    parser.add_argument('--net_prototxt', type = str, default = settings.caffevis_deploy_prototxt, help = 'network prototxt to load')
    parser.add_argument('--net_weights',  type = str, default = settings.caffevis_network_weights, help = 'network weights to load')
    parser.add_argument('--datadir',      type = str, default = settings.static_files_dir, help = 'directory to look for files in')
    parser.add_argument('--filelist',     type = str, default = settings.static_files_input_file, help = 'List of image files to consider, one per line. Must be the same filelist used to produce the NetMaxTracker!')
    parser.add_argument('--outdir',       type = str, default = settings.caffevis_outputs_dir, help = 'Which output directory to use. Files are output into outdir/layer/unit_%%04d/{maxes,deconv,backprop}_%%03d.png')
    parser.add_argument('--search-min',    action='store_true', default=False, help='Should we also search for minimal activations?')
    args = parser.parse_args()

    settings.caffevis_deploy_prototxt = args.net_prototxt
    settings.caffevis_network_weights = args.net_weights

    net, data_mean = load_network(settings)

    # validate batch size
    if settings.is_siamese and settings._calculated_siamese_network_format == 'siamese_batch_pair':
        # currently, no batch support for siamese_batch_pair networks
        # it can be added by simply handle the batch indexes properly, but it should be thoroughly tested
        assert (settings.max_tracker_batch_size == 1)

    # set network batch size
    current_input_shape = net.blobs[net.inputs[0]].shape
    current_input_shape[0] = settings.max_tracker_batch_size
    net.blobs[net.inputs[0]].reshape(*current_input_shape)
    net.reshape()

    assert args.do_maxes or args.do_deconv or args.do_deconv_norm or args.do_backprop or args.do_backprop_norm or args.do_info, 'Specify at least one do_* option to output.'

    siamese_helper = SiameseHelper(settings.layers_list)

    nmt = load_max_tracker_from_file(args.nmt_pkl)

    for layer_name in settings.layers_to_output_in_offline_scripts:

        print 'Started work on layer %s' % (layer_name)

        normalized_layer_name = siamese_helper.normalize_layer_name_for_max_tracker(layer_name)

        mt = nmt.max_trackers[normalized_layer_name]

        if args.idx_begin is None:
            idx_begin = 0
        if args.idx_end is None:
            idx_end = mt.max_vals.shape[0]

        with WithTimer('Saved %d images per unit for %s units %d:%d.' % (args.N, normalized_layer_name, idx_begin, idx_end)):

            output_max_patches(settings, mt, net, normalized_layer_name, idx_begin, idx_end,
                               args.N, args.datadir, args.filelist, args.outdir, False,
                               (args.do_maxes, args.do_deconv, args.do_deconv_norm, args.do_backprop, args.do_backprop_norm, args.do_info))

            if args.search_min:
                output_max_patches(settings, mt, net, normalized_layer_name, idx_begin, idx_end,
                                   args.N, args.datadir, args.filelist, args.outdir, True,
                                   (args.do_maxes, args.do_deconv, args.do_deconv_norm, args.do_backprop, args.do_backprop_norm, args.do_info))
コード例 #4
0
def main():
    try:
        # if model in command line change to it
        if '--model' in sys.argv:
            change_model_to_load(sys.argv[sys.argv.index('--model') + 1])

        import settings

        parser = argparse.ArgumentParser(
            description=
            'Script to find, with or without regularization, images that cause high or low activations of specific neurons in a network via numerical optimization. Settings are read from settings.py, overridden in settings_MODEL.py and settings_user.py, and may be further overridden on the command line.',
            formatter_class=lambda prog: argparse.
            ArgumentDefaultsHelpFormatter(prog, width=100))

        # Network and data options
        parser.add_argument('--caffe-root',
                            type=str,
                            default=settings.caffevis_caffe_root,
                            help='Path to caffe root directory.')
        parser.add_argument('--deploy-proto',
                            type=str,
                            default=settings.caffevis_deploy_prototxt,
                            help='Path to caffe network prototxt.')
        parser.add_argument('--net-weights',
                            type=str,
                            default=settings.caffevis_network_weights,
                            help='Path to caffe network weights.')
        parser.add_argument(
            '--channel-swap-to-rgb',
            type=str,
            default='(2,1,0)',
            help=
            'Permutation to apply to channels to change to RGB space for plotting. Hint: (0,1,2) if your network is trained for RGB, (2,1,0) if it is trained for BGR.'
        )
        parser.add_argument('--data-size',
                            type=str,
                            default='(227,227)',
                            help='Size of network input.')

        #### FindParams

        # Where to start
        parser.add_argument(
            '--start-at',
            type=str,
            default='mean_plus_rand',
            choices=('mean_plus_rand', 'randu', 'mean'),
            help='How to generate x0, the initial point used in optimization.')
        parser.add_argument(
            '--rand-seed',
            type=int,
            default=settings.optimize_image_rand_seed,
            help=
            'Random seed used for generating the start-at image (use different seeds to generate different images).'
        )
        parser.add_argument(
            '--batch-size',
            type=int,
            default=settings.optimize_image_batch_size,
            help=
            'Batch size used for generating several images, each index will be used as random seed'
        )

        # What to optimize
        parser.add_argument(
            '--push-layers',
            type=list,
            default=settings.layers_to_output_in_offline_scripts,
            help=
            'Name of layers that contains the desired neuron whose value is optimized.'
        )
        parser.add_argument(
            '--push-channel',
            type=int,
            default='130',
            help=
            'Channel number for desired neuron whose value is optimized (channel for conv, neuron index for FC).'
        )
        parser.add_argument(
            '--push-spatial',
            type=str,
            default='None',
            help=
            'Which spatial location to push for conv layers. For FC layers, set this to None. For conv layers, set it to a tuple, e.g. when using `--push-layer conv5` on AlexNet, --push-spatial (6,6) will maximize the center unit of the 13x13 spatial grid.'
        )
        parser.add_argument(
            '--push-dir',
            type=float,
            default=1,
            help=
            'Which direction to push the activation of the selected neuron, that is, the value used to begin backprop. For example, use 1 to maximize the selected neuron activation and  -1 to minimize it.'
        )

        # Use regularization?
        parser.add_argument('--decay',
                            type=float,
                            default=settings.optimize_image_decay,
                            help='Amount of L2 decay to use.')
        parser.add_argument(
            '--blur-radius',
            type=float,
            default=settings.optimize_image_blur_radius,
            help=
            'Radius in pixels of blur to apply after each BLUR_EVERY steps. If 0, perform no blurring. Blur sizes between 0 and 0.3 work poorly.'
        )
        parser.add_argument(
            '--blur-every',
            type=int,
            default=settings.optimize_image_blue_every,
            help='Blur every BLUR_EVERY steps. If 0, perform no blurring.')
        parser.add_argument(
            '--small-val-percentile',
            type=float,
            default=0,
            help=
            'Induce sparsity by setting pixels with absolute value under SMALL_VAL_PERCENTILE percentile to 0. Not discussed in paper. 0 to disable.'
        )
        parser.add_argument(
            '--small-norm-percentile',
            type=float,
            default=0,
            help=
            'Induce sparsity by setting pixels with norm under SMALL_NORM_PERCENTILE percentile to 0. \\theta_{n_pct} from the paper. 0 to disable.'
        )
        parser.add_argument(
            '--px-benefit-percentile',
            type=float,
            default=0,
            help=
            'Induce sparsity by setting pixels with contribution under PX_BENEFIT_PERCENTILE percentile to 0. Mentioned briefly in paper but not used. 0 to disable.'
        )
        parser.add_argument(
            '--px-abs-benefit-percentile',
            type=float,
            default=0,
            help=
            'Induce sparsity by setting pixels with contribution under PX_BENEFIT_PERCENTILE percentile to 0. \\theta_{c_pct} from the paper. 0 to disable.'
        )

        # How much to optimize
        parser.add_argument(
            '--lr-policy',
            type=str,
            default=settings.optimize_image_lr_policy,
            choices=LR_POLICY_CHOICES,
            help='Learning rate policy. See description in lr-params.')
        parser.add_argument(
            '--lr-params',
            type=str,
            default=settings.optimize_image_lr_params,
            help=
            'Learning rate params, specified as a string that evalutes to a Python dict. Params that must be provided dependon which lr-policy is selected. The "constant" policy requires the "lr" key and uses the constant given learning rate. The "progress" policy requires the "max_lr" and "desired_prog" keys and scales the learning rate such that the objective function will change by an amount equal to DESIRED_PROG under a linear objective assumption, except the LR is limited to MAX_LR. The "progress01" policy requires the "max_lr", "early_prog", and "late_prog_mult" keys and is tuned for optimizing neurons with outputs in the [0,1] range, e.g. neurons on a softmax layer. Under this policy optimization slows down as the output approaches 1 (see code for details).'
        )
        parser.add_argument(
            '--max-iters',
            type=list,
            default=settings.optimize_image_max_iters,
            help='List of number of iterations of the optimization loop.')

        # Where to save results
        parser.add_argument(
            '--output-prefix',
            type=str,
            default=settings.optimize_image_output_prefix,
            help=
            'Output path and filename prefix (default: outputs/%(p.push_layer)s/unit_%(p.push_channel)04d/opt_%(r.batch_index)03d)'
        )
        parser.add_argument(
            '--brave',
            action='store_true',
            default=True,
            help=
            'Allow overwriting existing results files. Default: off, i.e. cowardly refuse to overwrite existing files.'
        )
        parser.add_argument(
            '--skipbig',
            action='store_true',
            default=True,
            help=
            'Skip outputting large *info_big.pkl files (contains pickled version of x0, last x, best x, first x that attained max on the specified layer.'
        )
        parser.add_argument(
            '--skipsmall',
            action='store_true',
            default=True,
            help=
            'Skip outputting small *info.pkl files (contains pickled version of..'
        )
        parser.add_argument(
            '--model',
            type=str,
            default=None,
            help=
            'Name of the model you want to change to. This overwrites the settings made in files.'
        )
        args = parser.parse_args()

        # Finish parsing args

        lr_params = parse_and_validate_lr_params(parser, args.lr_policy,
                                                 args.lr_params)
        push_spatial = parse_and_validate_push_spatial(parser,
                                                       args.push_spatial)

        settings.caffevis_deploy_prototxt = args.deploy_proto
        settings.caffevis_network_weights = args.net_weights

        net, data_mean = load_network(settings)

        # validate batch size
        if settings.is_siamese and settings.siamese_network_format == 'siamese_batch_pair':
            # currently, no batch support for siamese_batch_pair networks
            # it can be added by simply handle the batch indexes properly, but it should be thoroughly tested
            assert (settings.max_tracker_batch_size == 1)

        current_data_shape = net.blobs['data'].shape
        net.blobs['data'].reshape(args.batch_size, current_data_shape[1],
                                  current_data_shape[2], current_data_shape[3])
        net.reshape()

        labels = None
        if settings.caffevis_labels:
            labels = read_label_file(settings.caffevis_labels)

        if data_mean is not None:
            if len(data_mean.shape) == 3:
                batched_data_mean = np.repeat(data_mean[np.newaxis, :, :, :],
                                              args.batch_size,
                                              axis=0)
            elif len(data_mean.shape) == 1:
                data_mean = data_mean[np.newaxis, :, np.newaxis, np.newaxis]
                batched_data_mean = np.tile(
                    data_mean, (args.batch_size, 1, current_data_shape[2],
                                current_data_shape[3]))
        else:
            batched_data_mean = data_mean

        optimizer = GradientOptimizer(
            settings,
            net,
            batched_data_mean,
            labels=labels,
            label_layers=settings.caffevis_label_layers,
            channel_swap_to_rgb=settings.caffe_net_channel_swap)

        if not args.push_layers:
            print "ERROR: No layers to work on, please set layers_to_output_in_offline_scripts to list of layers"
            return

        # go over push layers
        for count, push_layer in enumerate(args.push_layers):

            top_name = layer_name_to_top_name(net, push_layer)
            blob = net.blobs[top_name].data
            is_spatial = (len(blob.shape) == 4)
            channels = blob.shape[1]

            # get layer definition
            layer_def = settings._layer_name_to_record[push_layer]

            if is_spatial:
                push_spatial = (layer_def.filter[0] / 2,
                                layer_def.filter[1] / 2)
            else:
                push_spatial = (0, 0)

            # if channels defined in settings file, use them
            if settings.optimize_image_channels:
                channels_list = settings.optimize_image_channels
            else:
                channels_list = range(channels)

            # go over channels
            for current_channel in channels_list:
                params = FindParams(
                    start_at=args.start_at,
                    rand_seed=args.rand_seed,
                    batch_size=args.batch_size,
                    push_layer=push_layer,
                    push_channel=current_channel,
                    push_spatial=push_spatial,
                    push_dir=args.push_dir,
                    decay=args.decay,
                    blur_radius=args.blur_radius,
                    blur_every=args.blur_every,
                    small_val_percentile=args.small_val_percentile,
                    small_norm_percentile=args.small_norm_percentile,
                    px_benefit_percentile=args.px_benefit_percentile,
                    px_abs_benefit_percentile=args.px_abs_benefit_percentile,
                    lr_policy=args.lr_policy,
                    lr_params=lr_params,
                    max_iter=args.max_iters[count % len(args.max_iters)],
                    is_spatial=is_spatial,
                )

                optimizer.run_optimize(params,
                                       prefix_template=args.output_prefix,
                                       brave=args.brave,
                                       skipbig=args.skipbig,
                                       skipsmall=args.skipsmall)
    finally:
        clean_temp_file()
コード例 #5
0
def main():
    parser = get_parser()
    args = parser.parse_args()

    # Finish parsing args

    lr_params = parse_and_validate_lr_params(parser, args.lr_policy,
                                             args.lr_params)
    push_spatial = parse_and_validate_push_spatial(parser, args.push_spatial)

    settings.caffevis_deploy_prototxt = args.deploy_proto
    settings.caffevis_network_weights = args.net_weights

    net, data_mean = load_network(settings)

    # validate batch size
    if settings.is_siamese and settings.siamese_network_format == 'siamese_batch_pair':
        # currently, no batch support for siamese_batch_pair networks
        # it can be added by simply handle the batch indexes properly, but it should be thoroughly tested
        assert (settings.max_tracker_batch_size == 1)

    current_data_shape = net.blobs['data'].shape
    net.blobs['data'].reshape(args.batch_size, current_data_shape[1],
                              current_data_shape[2], current_data_shape[3])
    net.reshape()

    labels = None
    if settings.caffevis_labels:
        labels = read_label_file(settings.caffevis_labels)

    if data_mean is not None:
        if len(data_mean.shape) == 3:
            batched_data_mean = np.repeat(data_mean[np.newaxis, :, :, :],
                                          args.batch_size,
                                          axis=0)
        elif len(data_mean.shape) == 1:
            data_mean = data_mean[np.newaxis, :, np.newaxis, np.newaxis]
            batched_data_mean = np.tile(
                data_mean, (args.batch_size, 1, current_data_shape[2],
                            current_data_shape[3]))
    else:
        batched_data_mean = data_mean

    optimizer = GradientOptimizer(
        settings,
        net,
        batched_data_mean,
        labels=labels,
        label_layers=settings.caffevis_label_layers,
        channel_swap_to_rgb=settings.caffe_net_channel_swap)

    if not args.push_layers:
        print "ERROR: No layers to work on, please set layers_to_output_in_offline_scripts to list of layers"
        return

    # go over push layers
    for count, push_layer in enumerate(args.push_layers):

        top_name = layer_name_to_top_name(net, push_layer)
        blob = net.blobs[top_name].data
        is_spatial = (len(blob.shape) == 4)
        channels = blob.shape[1]

        # get layer definition
        layer_def = settings._layer_name_to_record[push_layer]

        if is_spatial:
            push_spatial = (layer_def.filter[0] / 2, layer_def.filter[1] / 2)
        else:
            push_spatial = (0, 0)

        # if channels defined in settings file, use them
        if settings.optimize_image_channels:
            channels_list = settings.optimize_image_channels
        else:
            channels_list = range(channels)

        # go over channels
        for current_channel in channels_list:
            params = FindParams(
                start_at=args.start_at,
                rand_seed=args.rand_seed,
                batch_size=args.batch_size,
                push_layer=push_layer,
                push_channel=current_channel,
                push_spatial=push_spatial,
                push_dir=args.push_dir,
                decay=args.decay,
                blur_radius=args.blur_radius,
                blur_every=args.blur_every,
                small_val_percentile=args.small_val_percentile,
                small_norm_percentile=args.small_norm_percentile,
                px_benefit_percentile=args.px_benefit_percentile,
                px_abs_benefit_percentile=args.px_abs_benefit_percentile,
                lr_policy=args.lr_policy,
                lr_params=lr_params,
                max_iter=args.max_iters[count % len(args.max_iters)],
                is_spatial=is_spatial,
            )

            optimizer.run_optimize(params,
                                   prefix_template=args.output_prefix,
                                   brave=args.brave,
                                   skipbig=args.skipbig,
                                   skipsmall=args.skipsmall)