def __init__(self, pthdir, layers=None, device=None, dissectdir=None): os.makedirs(pthdir, exist_ok=True) self.device = device if device is not None else torch.device('cpu') self.dissectdir = dissectdir if dissectdir is not None else ( os.path.join(pthdir, 'dissect')) self.modellock = threading.Lock() # Load the generator from the pth file. If the file is not there, download path_gan_checkpoint = os.path.join(pthdir, 'generator.pth') if not os.path.isfile(path_gan_checkpoint): wget.download( 'http://wednesday.csail.mit.edu/gaze/ganclevr/files/generator.pth', out=path_gan_checkpoint) model = proggan.from_pth_file( path_gan_checkpoint, map_location=lambda storage, location: storage) model.eval() self.model = model # Get the set of layers of interest. # Default: all shallow children except last. if layers is None: layers = [name for name, module in model.named_children()][:-1] self.layers = layers # Modify model to instrument the given layers retain_layers(model, layers) edit_layers(model, layers) # Move it to CUDA if wanted. model.to(device) # Determine z dimension. self.z_dimension = [ c for c in model.modules() if isinstance(c, torch.nn.Conv2d) ][0].in_channels # Run the model on one sample input to determine output image size as well as feature size of every layer z = torch.randn(self.z_dimension)[None, :, None, None].to(device) output = model(z) self.image_shape = output.shape[2:] self.layer_shape = { layer: tuple(model.retained[layer].shape) for layer in layers } for param in self.model.parameters(): param.requires_grad = False
def create_instrumented_model(args, **kwargs): ''' Creates an instrumented model out of a namespace of arguments that correspond to ArgumentParser command-line args: model: a string to evaluate as a constructor for the model. pthfile: (optional) filename of .pth file for the model. layers: a list of layers to instrument, defaulted if not provided. edit: True to instrument the layers for editing. gen: True for a generator model. One-pixel input assumed. imgsize: For non-generator models, (y, x) dimensions for RGB input. cuda: True to use CUDA. The constructed model will be decorated with the following attributes: input_shape: (usually 4d) tensor shape for single-image input. output_shape: 4d tensor shape for output. feature_shape: map of layer names to 4d tensor shape for featuremaps. retained: map of layernames to tensors, filled after every evaluation. ablation: if editing, map of layernames to [0..1] alpha values to fill. replacement: if editing, map of layernames to values to fill. When editing, the feature value x will be replaced by: `x = (replacement * ablation) + (x * (1 - ablation))` ''' args = EasyDict(vars(args), **kwargs) # Construct the network if args.model is None: print_progress('No model specified') return None if isinstance(args.model, torch.nn.Module): model = args.model else: model = autoimport_eval(args.model) # Unwrap any DataParallel-wrapped model if isinstance(model, torch.nn.DataParallel): model = next(model.children()) # Load its state dict meta = {} if getattr(args, 'pthfile', None) is not None: data = torch.load(args.pthfile) if 'state_dict' in data: meta = {} for key in data: if isinstance(data[key], numbers.Number): meta[key] = data[key] data = data['state_dict'] model.load_state_dict(data) model.meta = meta # Decide which layers to instrument. if getattr(args, 'layer', None) is not None: args.layers = [args.layer] if getattr(args, 'layers', None) is None: # Skip wrappers with only one named model container = model prefix = '' while len(list(container.named_children())) == 1: name, container = next(container.named_children()) prefix += name + '.' # Default to all nontrivial top-level layers except last. args.layers = [ prefix + name for name, module in container.named_children() if type(module).__module__ not in [ # Skip ReLU and other activations. 'torch.nn.modules.activation', # Skip pooling layers. 'torch.nn.modules.pooling' ] ][:-1] print_progress('Defaulting to layers: %s' % ' '.join(args.layers)) # Instrument the layers. retain_layers(model, args.layers) if getattr(args, 'edit', False): edit_layers(model, args.layers) model.eval() if args.cuda: model.cuda() # Annotate input, output, and feature shapes annotate_model_shapes(model, gen=getattr(args, 'gen', False), imgsize=getattr(args, 'imgsize', None)) return model
def main(): parser = argparse.ArgumentParser(description='GAN sample making utility') parser.add_argument('--model', type=str, default=None, help='constructor for the model to test') parser.add_argument('--pthfile', type=str, default=None, help='filename of .pth file for the model') parser.add_argument('--outdir', type=str, default='images', help='directory for image output') parser.add_argument('--size', type=int, default=100, help='number of images to output') parser.add_argument('--test_size', type=int, default=None, help='number of images to test') parser.add_argument('--layer', type=str, default=None, help='layer to inspect') parser.add_argument('--seed', type=int, default=1, help='seed') parser.add_argument('--maximize_units', type=int, nargs='+', default=None, help='units to maximize') parser.add_argument('--ablate_units', type=int, nargs='+', default=None, help='units to ablate') parser.add_argument('--quiet', action='store_true', default=False, help='silences console output') if len(sys.argv) == 1: parser.print_usage(sys.stderr) sys.exit(1) args = parser.parse_args() verbose_progress(not args.quiet) # Instantiate the model model = autoimport_eval(args.model) if args.pthfile is not None: data = torch.load(args.pthfile) if 'state_dict' in data: meta = {} for key in data: if isinstance(data[key], numbers.Number): meta[key] = data[key] data = data['state_dict'] model.load_state_dict(data) # Unwrap any DataParallel-wrapped model if isinstance(model, torch.nn.DataParallel): model = next(model.children()) # Examine first conv in model to determine input feature size. first_layer = [c for c in model.modules() if isinstance(c, (torch.nn.Conv2d, torch.nn.ConvTranspose2d, torch.nn.Linear))][0] # 4d input if convolutional, 2d input if first layer is linear. if isinstance(first_layer, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)): z_channels = first_layer.in_channels spatialdims = (1, 1) else: z_channels = first_layer.in_features spatialdims = () # Instrument the model if needed if args.maximize_units is not None: retain_layers(model, [args.layer]) model.cuda() # Get the sample of z vectors if args.maximize_units is None: indexes = torch.arange(args.size) z_sample = standard_z_sample(args.size, z_channels, seed=args.seed) z_sample = z_sample.view(tuple(z_sample.shape) + spatialdims) else: # By default, if maximizing units, get a 'top 5%' sample. if args.test_size is None: args.test_size = args.size * 20 z_universe = standard_z_sample(args.test_size, z_channels, seed=args.seed) z_universe = z_universe.view(tuple(z_universe.shape) + spatialdims) indexes = get_highest_znums(model, z_universe, args.maximize_units, args.size, seed=args.seed) z_sample = z_universe[indexes] if args.ablate_units: edit_layers(model, [args.layer]) dims = max(2, max(args.ablate_units) + 1) # >=2 to avoid broadcast model.ablation[args.layer] = torch.zeros(dims) model.ablation[args.layer][args.ablate_units] = 1 save_znum_images(args.outdir, model, z_sample, indexes, args.layer, args.ablate_units) copy_lightbox_to(args.outdir)
def main(): # Training settings def strpair(arg): p = tuple(arg.split(':')) if len(p) == 1: p = p + p return p parser = argparse.ArgumentParser( description='Net dissect utility', epilog=textwrap.dedent(help_epilog), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--model', type=str, default=None, help='constructor for the model to test') parser.add_argument('--pthfile', type=str, default=None, help='filename of .pth file for the model') parser.add_argument('--outdir', type=str, default='dissect', help='directory for dissection output') parser.add_argument('--layers', type=strpair, nargs='+', help='space-separated list of layer names to edit' + ', in the form layername[:reportedname]') parser.add_argument('--classes', type=str, nargs='+', help='space-separated list of class names to ablate') parser.add_argument('--metric', type=str, default='iou', help='ordering metric for selecting units') parser.add_argument('--startcount', type=int, default=1, help='number of units to ablate') parser.add_argument('--unitcount', type=int, default=30, help='number of units to ablate') parser.add_argument('--segmenter', type=str, default='dataset/broden', help='directory containing segmentation dataset') parser.add_argument('--netname', type=str, default=None, help='name for network in generated reports') parser.add_argument('--batch_size', type=int, default=5, help='batch size for forward pass') parser.add_argument('--size', type=int, default=1000, help='number of images to test') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA usage') parser.add_argument('--quiet', action='store_true', default=False, help='silences console output') if len(sys.argv) == 1: parser.print_usage(sys.stderr) sys.exit(1) args = parser.parse_args() # Set up console output verbose_progress(not args.quiet) # Speed up pytorch torch.backends.cudnn.benchmark = True # Construct the network if args.model is None: print_progress('No model specified') sys.exit(1) # Set up CUDA args.cuda = not args.no_cuda and torch.cuda.is_available() if args.cuda: torch.backends.cudnn.benchmark = True model = autoimport_eval(args.model) # Unwrap any DataParallel-wrapped model if isinstance(model, torch.nn.DataParallel): model = next(model.children()) # Load its state dict meta = {} if args.pthfile is None: print_progress('Dissecting model without pth file.') else: data = torch.load(args.pthfile) if 'state_dict' in data: meta = {} for key in data: if isinstance(data[key], numbers.Number): meta[key] = data[key] data = data['state_dict'] model.load_state_dict(data) # Instrument it and prepare it for eval if not args.layers: # Skip wrappers with only one named modele container = model prefix = '' while len(list(container.named_children())) == 1: name, container = next(container.named_children()) prefix += name + '.' # Default to all nontrivial top-level layers except last. args.layers = [ prefix + name for name, module in container.named_children() if type(module).__module__ not in [ # Skip ReLU and other activations. 'torch.nn.modules.activation', # Skip pooling layers. 'torch.nn.modules.pooling' ] ][:-1] print_progress('Defaulting to layers: %s' % ' '.join(args.layers)) edit_layers(model, args.layers) model.eval() if args.cuda: model.cuda() # Set up the output directory, verify write access if args.outdir is None: args.outdir = os.path.join('dissect', type(model).__name__) print_progress('Writing output into %s.' % args.outdir) os.makedirs(args.outdir, exist_ok=True) train_dataset = None # Examine first conv in model to determine input feature size. first_layer = [ c for c in model.modules() if isinstance(c, (torch.nn.Conv2d, torch.nn.ConvTranspose2d, torch.nn.Linear)) ][0] # 4d input if convolutional, 2d input if first layer is linear. if isinstance(first_layer, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)): sample = standard_z_sample(args.size, first_layer.in_channels)[:, :, None, None] train_sample = standard_z_sample(args.size, first_layer.in_channels, seed=2)[:, :, None, None] else: sample = standard_z_sample(args.size, first_layer.in_features) train_sample = standard_z_sample(args.size, first_layer.in_features, seed=2) dataset = TensorDataset(sample) train_dataset = TensorDataset(train_sample) recovery = autoimport_eval(args.segmenter) # Now do the actual work. device = next(model.parameters()).device labelnames, catnames = (recovery.get_label_and_category_names(dataset)) label_category = [catnames.index(c) for l, c in labelnames] labelnum_from_name = {n[0]: i for i, n in enumerate(labelnames)} segloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, num_workers=10, pin_memory=(device.type == 'cuda')) with open(os.path.join(args.outdir, 'dissect.json'), 'r') as f: dissect = EasyDict(json.load(f)) # Index the dissection layers by layer name. dissect_layer = {lrec.layer: lrec for lrec in dissect.layers} # First, collect a baseline for l in model.ablation: model.ablation[l] = None baseline = count_segments(recovery, segloader, model) # For each sort-order, do an ablation progress = default_progress() for classname in progress(args.classes): post_progress(c=classname) for layername in progress(model.ablation): post_progress(l=layername) rankname = '%s-%s' % (classname, args.metric) measurements = {} classnum = labelnum_from_name[classname] try: ranking = next(r for r in dissect_layer[layername].rankings if r.name == rankname) except: print('%s not found' % rankname) sys.exit(1) ordering = numpy.argsort(ranking.score) # Check if already done ablationdir = os.path.join(args.outdir, layername, 'ablation') if os.path.isfile(os.path.join(ablationdir, '%s.json' % rankname)): with open(os.path.join(ablationdir, '%s.json' % rankname)) as f: data = EasyDict(json.load(f)) # If the unit ordering is not the same, something is wrong if not all(a == o for a, o in zip(data.ablation_units, ordering)): import pdb pdb.set_trace() continue if len(data.ablation_effects) >= args.unitcount: continue # file already done. measurements = data.ablation_effects for count in progress(range(args.startcount, min(args.unitcount, len(ordering)) + 1), desc='units'): if str(count) in measurements: continue ablation = numpy.zeros(len(ranking.score), dtype='float32') ablation[ordering[:count]] = 1 for l in model.ablation: model.ablation[l] = ablation if layername == l else None m = count_segments(recovery, segloader, model)[classnum].item() print_progress( '%s %s %d units (#%d), %g -> %g' % (layername, rankname, count, ordering[count - 1].item(), baseline[classnum].item(), m)) measurements[str(count)] = m os.makedirs(ablationdir, exist_ok=True) with open(os.path.join(ablationdir, '%s.json' % rankname), 'w') as f: json.dump( dict(classname=classname, classnum=classnum, baseline=baseline[classnum].item(), layer=layername, metric=args.metric, ablation_units=ordering.tolist(), ablation_effects=measurements), f)