if use_gpu: trained_cnn = trained_cnn.cuda() print("=CNN state loaded=") print("Extracting features...") # Dump the features to then load them features_folder_name = save_features(trained_cnn, shapes_dataset, cnn_model_id) print("crating one hot metadata") if not shapes_dataset is None: # Create onehot metadata if not created yet if not does_shapes_onehot_metadata_exist(shapes_dataset): create_shapes_onehot_metadata(shapes_dataset) # Load metadata train_metadata, valid_metadata, test_metadata, noise_metadata = load_shapes_onehot_metadata( shapes_dataset) else: train_metadata = None valid_metadata = None test_metadata = None noise_metadata = None print("loaded metadata") print("loading data") # Load data if not shapes_dataset is None: if not use_symbolic_input: if should_train_visual:
cnn_dump_id) # Load data if should_train_visual: assert False _train_data, _valid_data, _test_data = load_images( 'shapes/{}'.format(target_shapes_dataset), BATCH_SIZE, K) else: n_pretrained_image_features, _t, _v, test_data = load_pretrained_features_zero_shot( target_features_folder_name, distractors_features_folder_name, BATCH_SIZE, K) assert n_pretrained_image_features == n_image_features # Create onehot metadata if not created yet - only target is needed if not does_shapes_onehot_metadata_exist(target_shapes_dataset): create_shapes_onehot_metadata(target_shapes_dataset) # Load metadata - only target is needed _train_metadata, _valid_metadata, target_test_metadata = load_shapes_onehot_metadata( target_shapes_dataset) # Settings dumps_dir = './dumps' if should_dump and not os.path.exists(dumps_dir): os.mkdir(dumps_dir) current_model_dir = '{}/{}_{}_{}'.format(dumps_dir, dump_id, vocab_size, max_sentence_length) if should_dump and not os.path.exists(current_model_dir): os.mkdir(current_model_dir)