def get_execution_engine(args): vocab = utils.load_vocab(args.vocab_json) if args.execution_engine_start_from is not None: ee, kwargs = utils.load_execution_engine( args.execution_engine_start_from) # TODO: Adjust vocab? else: kwargs = { 'vocab': vocab, 'feature_dim': parse_int_list(args.feature_dim), 'stem_batchnorm': args.module_stem_batchnorm == 1, 'stem_num_layers': args.module_stem_num_layers, 'module_dim': args.module_dim, 'module_residual': args.module_residual == 1, 'module_batchnorm': args.module_batchnorm == 1, 'classifier_proj_dim': args.classifier_proj_dim, 'classifier_downsample': args.classifier_downsample, 'classifier_fc_layers': parse_int_list(args.classifier_fc_dims), 'classifier_batchnorm': args.classifier_batchnorm == 1, 'classifier_dropout': args.classifier_dropout, } ee = ModuleNet(**kwargs) ee.cuda() ee.train() return ee, kwargs
def main(args): model = None try: with open(args.properties_json, 'r') as f: properties = json.load(f) except: print ("Unable to open properties file (properties_json argument)") exit() if args.baseline_model is not None: print('Loading baseline model from ', args.baseline_model) model, _ = utils.load_baseline(args.baseline_model) if args.vocab_json is not None: new_vocab = utils.load_vocab(args.vocab_json) model.rnn.expand_vocab(new_vocab['question_token_to_idx']) elif args.program_generator is not None and args.execution_engine is not None: print('Loading program generator from ', args.program_generator) program_generator, _ = utils.load_program_generator(args.program_generator) print('Loading execution engine from ', args.execution_engine) execution_engine, _ = utils.load_execution_engine(args.execution_engine, verbose=False) if args.vocab_json is not None: new_vocab = utils.load_vocab(args.vocab_json) program_generator.expand_encoder_vocab(new_vocab['question_token_to_idx']) model = (program_generator, execution_engine) else: print('Must give either --baseline_model or --program_generator and --execution_engine') return call_model(args, model)
def main(args): print() model = None import pdb pdb.set_trace() if args.baseline_model is not None: print('Loading baseline model from ', args.baseline_model) model, _ = utils.load_baseline(args.baseline_model) if args.vocab_json is not None: new_vocab = utils.load_vocab(args.vocab_json) model.rnn.expand_vocab(new_vocab['question_token_to_idx']) elif args.program_generator is not None and args.execution_engine is not None: print('Loading program generator from ', args.program_generator) program_generator, _ = utils.load_program_generator( args.program_generator) print('Loading execution engine from ', args.execution_engine) execution_engine, _ = utils.load_execution_engine( args.execution_engine, verbose=False) if args.vocab_json is not None: new_vocab = utils.load_vocab(args.vocab_json) program_generator.expand_encoder_vocab( new_vocab['question_token_to_idx']) model = (program_generator, execution_engine) else: print( 'Must give either --baseline_model or --program_generator and --execution_engine' ) return if args.question is not None and args.image is not None: run_single_example(args, model) else: vocab = load_vocab(args) loader_kwargs = { 'question_h5': args.input_question_h5, 'feature_h5': args.input_features_h5, 'vocab': vocab, 'batch_size': args.batch_size, } if args.num_samples is not None and args.num_samples > 0: loader_kwargs['max_samples'] = args.num_samples if args.family_split_file is not None: with open(args.family_split_file, 'r') as f: loader_kwargs['question_families'] = json.load(f) with ClevrDataLoader(**loader_kwargs) as loader: run_batch(args, model, loader)
def init_model(args): model = None if args.baseline_model is not None: print('Loading baseline model from ', args.baseline_model) model, _ = utils.load_baseline(args.baseline_model) if args.vocab_json is not None: new_vocab = utils.load_vocab(args.vocab_json) model.rnn.expand_vocab(new_vocab['question_token_to_idx']) elif args.program_generator is not None and args.execution_engine is not None: print('Loading program generator from ', args.program_generator) program_generator, _ = utils.load_program_generator(args.program_generator) print('Loading execution engine from ', args.execution_engine) execution_engine, _ = utils.load_execution_engine(args.execution_engine, verbose=False) if args.vocab_json is not None: new_vocab = utils.load_vocab(args.vocab_json) program_generator.expand_encoder_vocab(new_vocab['question_token_to_idx']) model = (program_generator, execution_engine) else: print('Must give either --baseline_model or --program_generator and --execution_engine') return None return model
def main(args): global AVAILABLE_OBJECTS, AVAILABLE_MATERIALS, AVAILABLE_SIZES, AVAILABLE_COLOURS global NUM_AVAILABLE_OBJECTS, NUM_AVAILABLE_MATERIALS, NUM_AVAILABLE_SIZES, NUM_AVAILABLE_COLOURS global obj_probs, material_probs, colour_probs, size_probs global save_directory model = None try: with open(args.properties_json, 'r') as f: properties = json.load(f) for name, rgb in properties['colors'].items(): rgba = [float(c) / 255.0 for c in rgb] + [1.0] AVAILABLE_COLOURS.append((name, rgba)) AVAILABLE_MATERIALS = [(v, k) for k, v in properties['materials'].items()] AVAILABLE_OBJECTS = [(v, k) for k, v in properties['shapes'].items()] AVAILABLE_SIZES = list(properties['sizes'].items()) NUM_AVAILABLE_OBJECTS = len(AVAILABLE_OBJECTS) NUM_AVAILABLE_MATERIALS = len(AVAILABLE_MATERIALS) NUM_AVAILABLE_SIZES = len(AVAILABLE_SIZES) NUM_AVAILABLE_COLOURS = len(AVAILABLE_COLOURS) # categorical probabilities obj_probs = torch.ones( NUM_AVAILABLE_OBJECTS) / NUM_AVAILABLE_OBJECTS material_probs = torch.ones( NUM_AVAILABLE_MATERIALS) / NUM_AVAILABLE_MATERIALS colour_probs = torch.ones( NUM_AVAILABLE_COLOURS) / NUM_AVAILABLE_COLOURS size_probs = torch.ones(NUM_AVAILABLE_SIZES) / NUM_AVAILABLE_SIZES except: print("Unable to open properties file (properties_json argument)") exit() # OOD extrapolation: add object (out of training set) if args.out_of_distribution == 1: AVAILABLE_OBJECTS.append(('Cone', 'cone')) NUM_AVAILABLE_OBJECTS += 1 obj_probs = torch.ones(NUM_AVAILABLE_OBJECTS) / NUM_AVAILABLE_OBJECTS elif args.out_of_distribution == 2: AVAILABLE_OBJECTS.append(('Corgi', 'corgi')) NUM_AVAILABLE_OBJECTS += 1 obj_probs = torch.ones(NUM_AVAILABLE_OBJECTS) / NUM_AVAILABLE_OBJECTS # adversarial or OOD extrapolation: remove object if args.remove_object_type != None: NEW_AVAILABLE_OBJECTS = [] for i in range(len(AVAILABLE_OBJECTS)): _, object_name = AVAILABLE_OBJECTS[i] if object_name != args.remove_object_type: NEW_AVAILABLE_OBJECTS.append(AVAILABLE_OBJECTS[i]) AVAILABLE_OBJECTS = NEW_AVAILABLE_OBJECTS NUM_AVAILABLE_OBJECTS = len(AVAILABLE_OBJECTS) obj_probs = torch.ones(NUM_AVAILABLE_OBJECTS) / NUM_AVAILABLE_OBJECTS if args.save_dir != None: save_directory = args.save_dir if args.baseline_model is not None: print('Loading baseline model from ', args.baseline_model) model, _ = utils.load_baseline(args.baseline_model) if args.vocab_json is not None: new_vocab = utils.load_vocab(args.vocab_json) model.rnn.expand_vocab(new_vocab['question_token_to_idx']) elif args.program_generator is not None and args.execution_engine is not None: print('Loading program generator from ', args.program_generator) program_generator, _ = utils.load_program_generator( args.program_generator) print('Loading execution engine from ', args.execution_engine) execution_engine, _ = utils.load_execution_engine( args.execution_engine, verbose=False) if args.vocab_json is not None: new_vocab = utils.load_vocab(args.vocab_json) program_generator.expand_encoder_vocab( new_vocab['question_token_to_idx']) model = (program_generator, execution_engine) else: print( 'Must give either --baseline_model or --program_generator and --execution_engine' ) return print("Calling inference!") random_latent = generate_random_latent(num_objects=args.num_objects) print(random_latent) if args.prob_test == 0: print("Running Metropolis Hastings (one constraint)") metropolis_hastings(initial_proposal=random_latent, num_iters=int(args.num_iters), std=0.05, args=args, model=model, target_class=args.class_a, num_objects=args.num_objects, output_csv=args.output_csv, test_name=args.test_name) if args.prob_test == 1: print("Running Metropolis Hastings (two constraints)") target_classes = [args.class_a, args.class_b] metropolis_hastings_two_classes(initial_proposal=random_latent, num_iters=int(args.num_iters), std=0.05, args=args, model=model, target_classes=target_classes, num_objects=args.num_objects) if args.prob_test == 2: print("Running Rejection Sampling (one constraint)") rejection_sampling(initial_proposal=random_latent, num_iters=int(args.num_iters), args=args, model=model, target_class=args.class_a, num_objects=args.num_objects, output_csv=args.output_csv, test_name=args.test_name)