def synthesize_shape(attributes, c=0, model_name='KPCA', raw=False): ''' attributes : array_like Values of shape attributes which have the range [0, 1]. model_name : str Name of the trained model. c : int The index of a cluster X : array_like Reconstructed high-dimensional design parameters ''' if not raw: transforms = [load_model(model_name + '_fpca', c)] transforms.append(load_model(model_name + '_fscaler', c)) raw_attr = inverse_features(attributes, transforms) # Min-Max normalization else: raw_attr = attributes model = load_model(model_name, c) xpca = load_model('xpca', c) dim_increase = xpca.inverse_transform data_rec = dim_increase( model.inverse_transform(raw_attr)) # Reconstruct design parameters return data_rec
def main_eval(args): assert args.load_from is not None, '--load_from required in eval mode' logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO) dataset_train, dataset_test, scaler = get_data(args) logging.info(f'evaluation mode. Level: {args.level}') device = torch.device( 'cuda:0') if torch.cuda.is_available() else torch.device('cpu') n_features = dataset_train.items.shape[1] generator, discriminator = get_models(args, n_features, device) experiment = Experiment(args.comet_api_key, project_name=args.comet_project_name, workspace=args.comet_workspace) experiment.log_parameters(vars(args)) load_model(Path(args.load_from), generator, discriminator, None, None, device) n_events = len(dataset_test) steps = (args.gan_test_ratio * n_events) // args.eval_batch_size evaluate_model(generator, experiment, dataset_test, args.eval_batch_size, steps, args, device, scaler, 0)
def __init__(self, word_vectors, char_vectors, hidden_size, intensive_path, num_heads, sketchy_path, gpu_ids, char_embed_drop_prob, drop_prob=0.): super(RetroQANet, self).__init__() self.sketchy = SketchyReader(word_vectors=word_vectors, char_vectors=char_vectors, hidden_size=hidden_size, num_heads=num_heads, char_embed_drop_prob=char_embed_drop_prob, drop_prob=drop_prob) self.sketchy = nn.DataParallel(self.sketchy, gpu_ids) self.sketchy, _ = util.load_model(self.sketchy, sketchy_path, gpu_ids) self.intensive = IntensiveReader( word_vectors=word_vectors, char_vectors=char_vectors, num_heads=num_heads, char_embed_drop_prob=char_embed_drop_prob, hidden_size=hidden_size, drop_prob=drop_prob) self.intensive = nn.DataParallel(self.intensive, gpu_ids) self.intensive, _ = util.load_model(self.intensive, intensive_path, gpu_ids) self.RV_TAV = layers.RV_TAV()
def test_model( use_cuda, dset_folder, disable_tqdm=False, ): best_model = GAT_MNIST(num_features=util.NUM_FEATURES, num_classes=util.NUM_CLASSES) util.load_model("best", best_model) if use_cuda: best_model = best_model.cuda() test_dset = MNIST(dset_folder, train=False, download=True) test_imgs = test_dset.data.unsqueeze(-1).numpy().astype(np.float64) with multiprocessing.Pool() as p: test_graphs = np.array(p.map(util.get_graph_from_image, test_imgs)) del test_imgs test_labels = test_dset.targets.numpy() test_accs = util.test( best_model, test_graphs, test_labels, list(range(len(test_labels))), use_cuda, desc="Test ", disable_tqdm=disable_tqdm, ) test_acc = 100 * np.mean(test_accs) print("TEST RESULTS: {acc:.2f}%".format(acc=test_acc))
def main(unused_argv): # create output dirs output_dir = Path(FLAGS.output_dir) Path.mkdir(output_dir, exist_ok=True) decode_dbl = parse_emotion_dbl(FLAGS.eval_file_path) if FLAGS.cpc_path is not None: cpc = load_model(FLAGS.cpc_path).eval().to(device) else: cpc = NoCPC().eval().to(device) model = load_model(FLAGS.model_path).eval().to(device) set_seeds() # Need the enumeration to ensure unique files for i, dbl_entry in enumerate(decode_dbl): filename = Path(dbl_entry.audio_path) preds = decode_emotions_from_file(filename.as_posix(), cpc, model, FLAGS.window_size) with open(str(output_dir / filename.name) + "_" + str(i), "w") as out_f: for pred in preds: out_f.write("{:.3f} {:.3f} {}\n".format( pred.start, pred.end, pred.label)) with open(output_dir / "score.dbl", "a") as dbl_fh: dbl_fh.write(str(output_dir / filename.name) + "_" + str(i) + "\n")
def playground(params): speaker_categs = torch.load(params.speaker_categs_path) num_speakers, speaker_feature_dim = speaker_categs.size() describer_model = util.load_model(params.header + DESCRIBER_FOOTER) describer = Describer( describer_model, speaker_feature_dim) describer.eval() reconstructor_model = util.load_model(params.header + RECONSTRUCTOR_FOOTER) reconstructor = Reconstructor(reconstructor_model, params.log_frac) latent_forger_model = util.load_model(params.header + LATENT_FORGER_FOOTER) latent_forger = LatentForger(latent_forger_model) describer.load_state_dict(torch.load( 'snapshots/' + params.header + DESCRIBER_FOOTER + '.pth', map_location=lambda storage, loc: storage)) reconstructor.load_state_dict(torch.load( 'snapshots/' + params.header + RECONSTRUCTOR_FOOTER + '.pth', map_location=lambda storage, loc: storage)) latent_forger.load_state_dict(torch.load( 'snapshots/' + params.header + LATENT_FORGER_FOOTER + '.pth', map_location=lambda storage, loc: storage)) IPython.embed()
def model_initialization(encoder_style, decoder_style, langs, embedding_size, learning_rate, use_model): # Initialize the model emb = docEmbedding(langs['rt'].n_words, langs['re'].n_words, langs['rm'].n_words, embedding_size) emb.init_weights() # Choose encoder style # TODO: Set up a choice for hierarchical or not if encoder_style == 'LIN': encoder = EncoderLIN(embedding_size, emb) elif encoder_style == 'BiLSTM': encoder = EncoderBiLSTM(embedding_size, emb) elif encoder_style == 'BiLSTMMax': encoder = EncoderBiLSTMMaxPooling(embedding_size, emb) elif encoder_style == 'HierarchicalBiLSTM': encoder_args = {"hidden_size": embedding_size, "local_embed": emb} encoder = HierarchicalBiLSTM(**encoder_args) elif encoder_style == 'HierarchicalLIN': encoder_args = {"hidden_size": embedding_size, "local_embed": emb} encoder = HierarchicalLIN(**encoder_args) else: # initialize hierarchical encoder rnn, (both global and local) encoder_args = {"hidden_size": embedding_size, "local_embed": emb} encoder = HierarchicalEncoderRNN(**encoder_args) # Choose decoder style and training function if decoder_style == 'HierarchicalRNN': decoder = HierarchicalDecoder(embedding_size, langs['summary'].n_words) train_func = Hierarchical_seq_train else: decoder = AttnDecoderRNN(embedding_size, langs['summary'].n_words) train_func = Plain_seq_train if use_cuda: emb.cuda() encoder.cuda() decoder.cuda() # Choose optimizer loss_optimizer = optim.Adagrad(list(encoder.parameters()) + list(decoder.parameters()), lr=learning_rate, lr_decay=0, weight_decay=0) # loss_optimizer = optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), # lr=learning_rate) if use_model is not None: encoder = load_model(encoder, use_model[0]) decoder = load_model(decoder, use_model[1]) if not use_cuda: loss_optimizer.load_state_dict( torch.load(use_model[2], map_location=lambda storage, loc: storage)) else: loss_optimizer.load_state_dict(torch.load(use_model[2])) return encoder, decoder, loss_optimizer, train_func
def predict(model, file_path, device, model_path=None, f_name='pred_masks', threshold=0.5): if model_path is not None: load_model(model_path, model, map_location=device) pred_mask_path = Path(file_path) / f_name if not pred_mask_path.exists(): pred_mask_path.mkdir() transforms = A.Compose([A.Resize(256, 256), A.Normalize(), ToTensorV2()]) ds = TestKvasirSegDataset(file_path, transforms) d_loader = DataLoader(ds, batch_size=4) model.to(device) model.eval() with torch.no_grad(): for i, data in tqdm(enumerate(d_loader), desc="Predict", total=len(d_loader)): imgs, paths = data['images'], data['paths'] imgs = imgs.to(device).float() pred_masks = model(imgs) pred_masks = (pred_masks.cpu().detach().numpy() > threshold).astype(np.uint8) for mask, p in zip(pred_masks, paths): f_name = p.split('\\')[-1].split('.')[0] + '.png' plt.imsave(str(pred_mask_path / f_name), mask.squeeze(), cmap='gray')
def keep_only_used_docs(gt_file, run_to_rerank, encoded_docs_folder): dbn_filtered = {} nskipped_rel_docs = 0 for line in open(gt_file): data = line.split() dname = data[2].strip() rel_or_not = int(data[-1]) if dname not in dbn_filtered.keys(): if not os.path.isfile(os.path.join(encoded_docs_folder, dname)) and rel_or_not > 0: nskipped_rel_docs += 1 elif os.path.isfile(os.path.join(encoded_docs_folder, dname)): dbn_filtered[dname] = util.load_model( os.path.join(encoded_docs_folder, dname)) print('n skipped rel docs: ' + str(nskipped_rel_docs)) for line in open(run_to_rerank, encoding='latin-1'): data = line.split() dname = data[2].strip() if dname not in dbn_filtered.keys(): dbn_filtered[dname] = util.load_model( os.path.join(encoded_docs_folder, dname)) return dbn_filtered
def compute_train_test_q_names(q_names): np.random.seed(0) if not os.path.isfile('test_q_names'): training_q_names = np.random.choice(q_names, 200, replace=False) test_q_names = [qn for qn in q_names if qn not in training_q_names] util.save_model(test_q_names, 'test_q_names') util.save_model(training_q_names, 'train_q_names') else: training_q_names = util.load_model('train_q_names') test_q_names = util.load_model('test_q_names') return training_q_names, test_q_names
def main(unused_argv): # create output dirs output_dir = Path(FLAGS.output_dir) Path.mkdir(output_dir, exist_ok=True) if FLAGS.cpc_path is not None: cpc = load_model(FLAGS.cpc_path).eval().to(device) else: cpc = NoCPC().eval().to(device) model = load_model(FLAGS.model_path).eval().to(device) dataset = AudioDataset(FLAGS.eval_file_path, train=False) dataloader = AudioDataLoader( dataset, window_size=None, batch_size=1, feature_transform=cpc.data_class, num_workers=8, shuffle=False, ) set_seeds() # Need the enumeration to ensure unique files for i, batch in enumerate(dataloader): data = batch["data"].to(device) cpc.reset_state() preds = [] prev_end_s = 0.0 windows = torch.split(data, FLAGS.window_size, dim=1) for window in windows: with torch.no_grad(): features = cpc(window) pred = model(features).argmax(dim=2).squeeze(dim=0) outputs, prev_end_s = preds_to_output( pred, window.shape[1], dataloader.sampling_rate, prev_end_s, ) preds.extend(outputs) filename = Path(batch["files"][0]) with open(str(output_dir / filename.name) + "_" + str(i), "w") as out_f: for pred in preds: out_f.write("{:.3f} {:.3f} {}\n".format( pred.start, pred.end, pred.label)) with open(output_dir / "score.dbl", "a") as dbl_fh: dbl_fh.write(str(output_dir / filename.name) + "_" + str(i) + "\n")
def compute_data(): ftext_model_path = '../data/fasttext_models/wiki.en.bin' output_path_wi_model = '../data/fasttext_models/wi_robust' output_path_ii_model = '../data/fasttext_models/ii_robust' output_path_idf_model = '../data/fasttext_models/idf_robust' output_path_encoded_d_model = '../data/fasttext_models/encoded_dbn' output_path_encoded_q_model = '../data/fasttext_models/encoded_qbn' output_path_we_matrix_model = '../data/fasttext_models/word_embeddings_matrix_robust' coll_path = '/Users/albertopurpura/ExperimentalCollections/Robust04/processed/corpus' queries_main_folder = '/Users/albertopurpura/ExperimentalCollections/Robust04/processed/topics' output_model_path = 'data/robust/stemmed_coll_model' encoded_out_folder_docs = 'data/robust/stemmed_encoded_docs_ft' stemming = True if not os.path.isfile(output_path_ii_model): print('computing inverted index') ii = compute_inverted_index(coll_path, stemming, output_path_ii_model) util.save_model(ii, output_path_ii_model) else: print('loading inverted index') ii = util.load_model(output_path_ii_model) if not os.path.isfile(output_path_encoded_d_model): text_dbn = read_collection(coll_path, output_model_path, stemming=stemming, stoplist=util.load_indri_stopwords()) encoded_dbn, wi, we_matrix = compute_input_data( text_dbn, ftext_model_path, encoded_out_folder_docs) util.save_model(encoded_dbn, output_path_encoded_d_model) util.save_model(wi, output_path_wi_model) util.save_model(we_matrix, output_path_we_matrix_model) else: encoded_dbn = util.load_model(output_path_encoded_d_model) wi = util.load_model(output_path_wi_model) we_matrix = util.load_model(output_path_we_matrix_model) if not os.path.isfile(output_path_encoded_q_model): encoded_qbn = encode_queries(queries_main_folder, wi, stemming) util.save_model(encoded_qbn, output_path_encoded_q_model) else: encoded_qbn = util.load_model(output_path_encoded_q_model) idf_scores = du.compute_idf(coll_path, stemming, output_path_ii_model, output_path_idf_model) return encoded_dbn, encoded_qbn, we_matrix, wi, ii, idf_scores
def alt_load_training_batches(batches_folder, training_qnames, batch_size, max_q_len, max_d_len): pairs = [] print('loading training batches') for filename in tqdm(os.listdir(batches_folder)): fp = os.path.join(batches_folder, filename) tqn = filename.split('_')[-1].replace('qn=', '') if tqn in training_qnames: if os.path.isfile(fp): pair = util.load_model(fp) pairs.append(pair) max_q_len_b = [] max_d_len_b = [] y_b = [] b_q_len = [] b_d_len = [] b_sm = [] for pair in pairs: for p in pair: max_q_len_b.append(max_q_len) max_d_len_b.append(max_d_len) y_b.append(p[1]) b_q_len.append(p[3]) b_d_len.append(p[4]) b_sm.append(p[2]) if len(max_q_len_b) == 2 * batch_size: yield (max_q_len_b, max_d_len_b, y_b, b_q_len, b_d_len, b_sm) max_q_len_b = [] max_d_len_b = [] y_b = [] b_q_len = [] b_d_len = [] b_sm = []
def cnn_process(): use_model = args.cnn_use_model util.topic_log(use_model) if use_model == 'resnet50': pretrain_model = torch_models.resnet50(pretrained=True) elif use_model == 'resnet101': pretrain_model = torch_models.resnet101(pretrained=True) elif use_model == 'resnet152': pretrain_model = torch_models.resnet152(pretrained=True) elif use_model == 'alexnet': pretrain_model = torch_models.alexnet(pretrained=True) else: raise Exception model = models.FineTuneModel( pretrain_model, 'resnet' if 'resnet' in use_model else use_model).to(args.device) optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=0.0001) best_top1 = 0 if args.load_model: best_top1 = util.load_model(model, optimizer, args, args.save_model_path) print(f'load_model: {args.load_model} ({best_top1})') util.model_fit(model, optimizer, args, train_loader, test_loader, best_top1) _, predicts = util.test_epoch(model, args, test_loader, get_predicts=True) util.evaluate_log(predicts, test_labels)
def init_training(args, word_vectors, char_vectors, device, config=None): config = config or {} model_name = args.name if model_name == 'baseline': model, optimizer, scheduler = init_baseline_training(args, word_vectors, config) else: if model_name == 'claf': model, optimizer, scheduler = init_claf_training(args, char_vectors, word_vectors, config) elif model_name == 'qanet': model, optimizer, scheduler = init_qanet_training(args, char_vectors, word_vectors, config) elif model_name == 'qanet2': model, optimizer, scheduler = init_qanet2_training(args, char_vectors, word_vectors, config) else: raise Exception("Unknown model") model = nn.DataParallel(model, args.gpu_ids) if args.load_path: model, step = util.load_model(model, args.load_path, args.gpu_ids) else: step = 0 model = model.to(device) model.train() ema_decay = args.ema_decay ema = util.EMA(model, ema_decay) return model, optimizer, scheduler, ema, step
def main(): config_file = open('./config.json') config = json.load(config_file, object_hook=lambda d: namedtuple('x', d.keys()) (*d.values())) num_unrolls = config.num_steps // config.unroll_length with tf.Session() as sess: model = util.load_model(sess, config, logger) all_y = [] for i in range(10): print(i) _, loss, reset, fx_array, x_array = model.step() cost, others = util.run_epoch(sess, loss, [fx_array, x_array], reset, num_unrolls) Y, X = others all_y.append(Y) all_y = np.hstack(all_y) np.save('srnn.npy', all_y) plt.figure(1) y_mean = np.mean(all_y, axis=1) plt.plot(y_mean) print(min(y_mean)) plt.show()
def load_encoded_collection(encoded_coll_folder): encoded_docs_by_name = {} for filename in tqdm(os.listdir(encoded_coll_folder)): fp = os.path.join(encoded_coll_folder, filename) if os.path.isfile(fp): encoded_docs_by_name[filename] = util.load_model(fp) return encoded_docs_by_name
def show_first_menu(): # Record which models are in the models folder models_list = [ f for f in listdir(models_path) if isfile(join(models_path, f)) and f[-4:] == ".dat" ] first_menu = ConsoleMenu("Main menu") submenu = SelectionMenu(models_list, "Load Model") submenu_item = SubmenuItem("Load a model", submenu, menu=first_menu, should_exit=True) first_menu.append_item(submenu_item) first_menu.start() first_menu.join() if submenu.selected_option >= len(models_list): show_first_menu() return elif submenu.selected_option == -1: return selected_model = models_list[submenu.selected_option] net, jtree = util.load_model(models_path + selected_model) if net is not None and jtree is not None: jtree.initialize_tables(net) print("Model loaded succesfully") show_loaded_model_menu(selected_model, net, jtree) else: show_first_menu()
def main(): print("Loading model...") model, metadata = load_model(MODEL_NAME) print("Finding representative samples of output labels...") print("\n\n") good_samples = most_representative_samples(model, *metadata) predict_user_input(model, *metadata, good_samples=good_samples)
def compute_inverted_index(coll_folder, stemming, output_file_path_ii): if not os.path.isfile(output_file_path_ii): print('computing inverted index') inverted_idx = {} sw = util.load_indri_stopwords() doc_n = 0 for filename in tqdm(os.listdir(coll_folder)): fp = os.path.join(coll_folder, filename) doc_id = filename.split(r'.')[0] if os.path.isfile(fp): doc_n += 1 d = util.tokenize(' '.join(open(fp, 'r').readlines()), stemming, stoplist=sw) set_w_in_doc = set(d) for w in set_w_in_doc: if w in inverted_idx.keys(): inverted_idx[w].append((doc_id, d.count(w))) else: inverted_idx[w] = [(doc_id, d.count(w))] util.save_model(inverted_idx, output_file_path_ii) else: inverted_idx = util.load_model(output_file_path_ii) return inverted_idx
def get_model(log,args): if args.model_name == "GAT": model = SigGraInferNet_GAT(feature_input_size=args.feature_input_size, feature_output_size=args.feature_output_size, PPI_input_size=args.PPI_input_size, PPI_output_size=args.PPI_output_size, num_GAT=args.num_GNN, num_head=args.num_head, drop_prob=args.drop_prob) elif args.model_name == "GCN": model = SigGraInferNet_GCN(feature_input_size=args.feature_input_size, feature_output_size=args.feature_output_size, PPI_input_size=args.PPI_input_size, PPI_output_size=args.PPI_output_size, num_GCN=args.num_GNN, drop_prob=args.drop_prob) else: raise ValueError("Model name doesn't exist.") model = nn.DataParallel(model, args.gpu_ids) for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) if args.load_path: log.info(f'Loading checkpoint from {args.load_path}...') model, step = util.load_model(model, args.load_path, args.gpu_ids) else: step = 0 return model,step
def read_collection(coll_main_folder, output_model_path, stemming, stoplist=None): if not os.path.isfile(output_model_path): if stoplist is None: stoplist = util.load_indri_stopwords() text_by_name = {} print('reading files in folder') pool = multiprocessing.Pool(8) fnames_list = os.listdir(coll_main_folder) doc_paths_list = [ os.path.join(coll_main_folder, filename) for filename in fnames_list ] print('processing collection') tokenized_docs = pool.starmap( util.tokenize, [(' '.join(open(fp, 'r').readlines()), stemming, stoplist) for fp in doc_paths_list]) for i in range(len(fnames_list)): text_by_name[fnames_list[i].split(r'.')[0]] = tokenized_docs[i] print('saving model') util.save_model(text_by_name, output_model_path) else: print('loading model: %s' % output_model_path) text_by_name = util.load_model(output_model_path) return text_by_name
def load_training_batches_w2v_gc(data_folder, training_query_names, batch_size): data1_len_batch = [] data2_len_batch = [] y_batch = [] cross_batch = [] batches = [] for filename in tqdm(os.listdir(data_folder)): fp = os.path.join(data_folder, filename) for qn in training_query_names: if 'qn=' + qn == filename.split('_')[0]: if os.path.isfile(fp): data = util.load_model(fp) for p in data: y_batch.append(p[0]) data1_len_batch.append(p[1]) data2_len_batch.append(p[2]) cross_batch.append(p[3]) if len(cross_batch) == 2 * batch_size: batches.append((data1_len_batch, data2_len_batch, y_batch, cross_batch)) data1_len_batch = [] data2_len_batch = [] y_batch = [] cross_batch = [] np.random.seed(0) np.random.shuffle(batches) for b in batches: yield b
def run(down_station, input_list, include_time, sample_size, network_type, nr_layers, nr_units): """Runner""" result_dir = util.get_result_dir(down_station, network_type, nr_layers, nr_units, sample_size) util.plot_training_performance(result_dir) model_file = util.model_file_name(result_dir) # model_file = util.model_file_name_lowest_cv(result_dir) # lowest cv model my_model = util.load_model(model_file) # uncomment for DWS prediction # for specific dates, see internals of data.construct #(_, _, _, _, _, _, _, _, train_y_max, train_y_min, _, _, _, full_x, full_y) = data.construct(down_station, input_list, include_time, sample_size, network_type) #predict(my_model, result_dir, full_x, full_y, train_y_max, train_y_min) # uncomment for normal prediction #(_, _, y_cv, x_cv, _, _, _, _, train_y_max, train_y_min, _, _, _, full_x, full_y) = data.construct(down_station, input_list, include_time, sample_size, network_type) #predict(my_model, result_dir, x_cv, y_cv, train_y_max, train_y_min) # uncomment for test prediction (_, _, _, _, y_test, x_test, _, _, train_y_max, train_y_min, _, _, _, full_x, full_y) = data.construct(down_station, input_list, include_time, sample_size, network_type) predict(my_model, result_dir, x_test, y_test, train_y_max, train_y_min)
def main(): gan = load_model(args.model_name) cv2.namedWindow(gan.model_name, cv2.WINDOW_NORMAL) vc = cv2.VideoCapture(cv2.CAP_DSHOW) if vc.isOpened(): rval, frame_in = vc.read() else: rval = False with tf.Session(config=config) as sess: try: tf.global_variables_initializer().run() except: tf.initialize_all_variables().run() gan.load_checkpoints(sess) while rval: frame_in = cv2.resize(frame_in, (256, 256)) frame_out = gan.infer_img(sess, frame_in) output = np.concatenate((frame_in, frame_out), axis=1) cv2.imshow(gan.model_name, output) rval, frame_in = vc.read() key = cv2.waitKey(20) if key == 27: # exit on ESC break cv2.destroyWindow(gan.model_name)
def main(): # Read config file cfg = util.read_config('config/digit.yaml') # Load digit data from dataset x_train, x_test, y_train, y_test = load_data(cfg['dataset']) x_train, y_train = util.shuffle_data(x_train, y_train) x_test, y_test = util.shuffle_data(x_test, y_test) # Default model name as loaded from file, overwritten if training model_name = cfg['nn']['model_name'] model_dir = cfg['nn']['model_dir'] with tf.Session() as sess: if cfg['nn']['train']: # Train network on our training data print('[ANN] Training new network...') model, model_name = train_network(sess, x_train, y_train, cfg) else: print('[ANN] Testing network {0}...'.format(model_name)) model = util.load_model( os.path.join(model_dir, model_name + "_model")) # Test network on our testing data results = test_network(sess, model, x_test, y_test, cfg) # TODO: Tristan to reimplement analyse results to get confusion matrix and roc curve conf_mat = {} # conf_mat = util.analyse_results(y_test, results) util.store_results(conf_mat, os.path.join(model_dir, model_name + "_cm"))
def load_test_fd_pwe(fold, data_folder): # test_fd = [] # test_fd_fp = 'test_fd_' + str(fold) for filename in os.listdir(data_folder): fp = os.path.join(data_folder, filename) if '_fd_' + str(fold) in filename: if os.path.isfile(fp): yield util.load_model(fp)
def load_test_batches(data_folder, qnames): for qn in qnames: for filename in os.listdir(data_folder): fp = os.path.join(data_folder, filename) if 'qn=' + qn == filename.split('_')[-1]: if os.path.isfile(fp): len_q, len_d, d_names, q_name, sim_m = util.load_model(fp) yield len_q, len_d, d_names, q_name, sim_m
def test_best_ind(): env = gym.make("Pong-ram-v0").env best_individual = load_model(conf.save_path) fitness = best_individual.play_and_evaluation(env, num_games=3, visualization=False) print("fitness", fitness) env.close()
def get_models(dir): models = [] path = os.path.join(dir, "validation", "models") for modelName in os.listdir(path): model = load_model(modelName, dir) models.append(model) return models
def main(): args = parser.parse_args() # create repo repo = os.path.join(args.exp, 'conv' + str(args.conv)) if not os.path.isdir(repo): os.makedirs(repo) # build model model = load_model(args.model) model.cuda() for params in model.parameters(): params.requires_grad = False model.eval() #load data normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) tra = [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] # dataset dataset = datasets.ImageFolder(args.data, transform=transforms.Compose(tra)) dataloader = torch.utils.data.DataLoader(dataset, batch_size=256, num_workers=args.workers) # keys are filters and value are arrays with activation scores for the whole dataset layers_activations = {} for i, (input_tensor, _) in enumerate(dataloader): input_var = torch.autograd.Variable(input_tensor.cuda(), volatile=True) activations = forward(model, args.conv, input_var) if i == 0: layers_activations = {filt: np.zeros(len(dataset)) for filt in activations} if i < len(dataloader) - 1: e_idx = (i + 1) * 256 else: e_idx = len(dataset) s_idx = i * 256 for filt in activations: layers_activations[filt][s_idx: e_idx] = activations[filt].cpu().data.numpy() if i % 100 == 0: print('{0}/{1}'.format(i, len(dataloader))) # save top 9 images for each filter for filt in layers_activations: repofilter = os.path.join(repo, filt) if not os.path.isdir(repofilter): os.mkdir(repofilter) top = np.argsort(layers_activations[filt])[::-1] for img in top[:9]: src, _ = dataset.imgs[img] copyfile(src, os.path.join(repofilter, src.split('/')[-1]))
def ge_cmd_predict(): args = parse_arg_predict() # prepare input to GE_learn data = util.load_data(args.data) model = util.load_model(args.model) pred_path = args.output pred = GE_predict(data, model) util.write_prediction(pred, pred_path) return
def main(): folder = '/data/hanlin' person_path_dic = load_one_deep_path(folder) sample_list, person_num = person_path_dic_trans(person_path_dic) model, get_Conv_FeatureMap = load_model(output_layer_index=18) data = [] label = [] start = time() for pic_path, person_index in sample_list: feature_vector = extract(pic_path, get_Conv_FeatureMap, pic_shape)[0] data.append(feature_vector) label.append(person_index) end = time() print (end - start) msgpack_numpy.dump((data, label), open('hanlin.p', 'wb'))
parser.set_defaults(multires=False) args = parser.parse_args() # Load the dataset and the image helper print "Prepare the dataset from ", args.dataset dataset = Dataset(args.dataset, args.eval_binary) ensure_directory_exists(args.temp_dir + '/') if args.stage in ('extract_train', 'db_features', 'q_features'): if args.model == 'pretrained': print("loading supervised pretrained VGG-16") net = torchvision.models.vgg16_bn(pretrained=True) else: net = load_model(args.model) transforms_comp = [] features_layers = list(net.features.children())[:-1] net.features = torch.nn.Sequential(*features_layers) transforms_comp.extend([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) transforms = torchvision.transforms.Compose(transforms_comp) print("moving to GPU") net.cuda() net.eval()
def main(): args = parser.parse_args() print(args) # fix random seeds torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) np.random.seed(args.seed) # create model and move it to gpu model = load_model(args.model) model.top_layer = nn.Linear(model.top_layer.weight.size(1), 20) model.cuda() cudnn.benchmark = True # what partition of the data to use if args.split == 'train': args.test = 'val' elif args.split == 'trainval': args.test = 'test' # data loader normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) dataset = VOC2007_dataset(args.vocdir, split=args.split, transform=transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomResizedCrop(224, scale=(args.min_scale, args.max_scale), ratio=(1, 1)), transforms.ToTensor(), normalize, ])) loader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=False, num_workers=24, pin_memory=True) print('PASCAL VOC 2007 ' + args.split + ' dataset loaded') # re initialize classifier for y, m in enumerate(model.classifier.modules()): if isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) m.bias.data.fill_(0.1) model.top_layer.bias.data.fill_(0.1) if not args.fc6_8: for y, m in enumerate(model.features.modules()): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels for i in range(m.out_channels): m.weight.data[i].normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() else: # freeze some layers for param in model.features.parameters(): param.requires_grad = False # unfreeze batchnorm scaling if args.train_batchnorm: for layer in model.modules(): if isinstance(layer, torch.nn.BatchNorm2d): for param in layer.parameters(): param.requires_grad = True # set optimizer optimizer = torch.optim.SGD( filter(lambda x: x.requires_grad, model.parameters()), lr=args.lr, momentum=0.9, weight_decay=args.wd, ) criterion = nn.BCEWithLogitsLoss(reduction='none') print('Start training') it = 0 losses = AverageMeter() while it < args.nit: it = train( loader, model, optimizer, criterion, args.fc6_8, losses, it=it, total_iterations=args.nit, stepsize=args.stepsize, ) print('Evaluation') if args.eval_random_crops: transform_eval = [ transforms.RandomHorizontalFlip(), transforms.RandomResizedCrop(224, scale=(args.min_scale, args.max_scale), ratio=(1, 1)), transforms.ToTensor(), normalize, ] else: transform_eval = [ transforms.Resize(256), transforms.TenCrop(224), transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops])) ] print('Train set') train_dataset = VOC2007_dataset(args.vocdir, split=args.split, transform=transforms.Compose(transform_eval)) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=1, shuffle=False, num_workers=24, pin_memory=True, ) evaluate(train_loader, model, args.eval_random_crops) print('Test set') test_dataset = VOC2007_dataset(args.vocdir, split=args.test, transform=transforms.Compose(transform_eval)) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=1, shuffle=False, num_workers=24, pin_memory=True, ) evaluate(test_loader, model, args.eval_random_crops)
def main(): args = parser.parse_args() # sanity check if args.arch == 'alexnet': assert args.conv < 6 elif args.arch == 'vgg16': assert args.conv < 14 # create repo repo = os.path.join(args.exp, 'conv' + str(args.conv)) if not os.path.isdir(repo): os.makedirs(repo) # build model model = load_model(args.model) model.cuda() for params in model.parameters(): params.requires_grad = False model.eval() def gradient_ascent(f): print f, sys.stdout.flush() fname_out = '{0}/layer{1}-channel{2}.jpeg'.format(repo, args.conv, f) img_noise = np.random.normal(size=(args.idim, args.idim, 3)) * 20 + 128 img_noise = img_noise.astype('float32') inp = transforms.ToTensor()(img_noise) inp = torch.unsqueeze(inp, 0) for it in range(args.niter): x = torch.autograd.Variable(inp.cuda(), requires_grad=True) out = forward(model, args.conv-1, f, x) criterion = nn.CrossEntropyLoss() filt_var = torch.autograd.Variable(torch.ones(1).long()*f).cuda() output = out.mean(3).mean(2) loss = - criterion(output, filt_var) - args.wd*torch.norm(x)**2 # compute gradient loss.backward() # normalize gradient grads = x.grad.data.cpu() grads = grads.div(torch.norm(grads)+1e-8) # apply gradient inp = inp.add(args.lr*grads) # gaussian blur if it%args.step == 0: inp = gaussian_filter(torch.squeeze(inp).numpy().transpose((2, 1, 0)), sigma=(args.sig, args.sig, 0)) inp = torch.unsqueeze(torch.from_numpy(inp).float().transpose(2, 0), 0) # save image at the last iteration if it == args.niter - 1: a = deprocess_image(inp.numpy()) Image.fromarray(a).save(fname_out) map(gradient_ascent, range(CONV[args.arch][args.conv-1]))