def train(args): model, start_epoch = build_network(snapshot=args.snapshot, backend=cfg.backend) optimizer = optim.Adam( model.parameters(), lr=cfg.start_lr, weight_decay=cfg.weight_decay ) criterion = FocalLoss(alpha=cfg.alpha, gamma=cfg.gamma) def worker_init_fn(worker_id): np.random.seed(np.random.get_state()[1][0] + worker_id) sample_path = [ os.path.join(cfg.sample_path, x) for x in os.listdir(cfg.sample_path) if ".npz" in x ] train_data = CervicalDataset( sample_path, cfg.patch_size, transform=transforms.Compose( [ RandomHorizontalFlip(0.5), RandomVerticalFlip(0.5), RandomRotate90(0.5), Normalizer(), ] ), ) train_loader = DataLoader( train_data, batch_size=cfg.batch_size, shuffle=True, drop_last=False, collate_fn=collater, num_workers=cfg.num_worker, worker_init_fn=worker_init_fn, ) epochs = cfg.epochs for epoch in range(start_epoch, epochs): train_cls_loss, train_reg_loss, train_loss = train_epoch( model=model, train_loader=train_loader, criterion=criterion, optimizer=optimizer, epoch=epoch, n_epochs=epochs, start_lr=cfg.start_lr, lr_power=cfg.lr_power, print_freq=1, ) save_model(model, epoch, cfg.checkpoint_path) mkdir(cfg.log_path) with open(cfg.log_path + "train_log.csv", "a") as log_file: log_file.write( "%03d,%0.5f,%0.5f,%0.5f\n" % ((epoch + 1), train_cls_loss, train_reg_loss, train_loss) )
def __init__(self, params): self.params = params self.epochs = params['epochs'] self.batch_size = params['batch_size'] self.display_step = params['display_step'] self.train_graph = None self.train_op = None self.train_source = None self.train_target = None self.valid_sources_batch = None self.valid_targets_batch = None self.valid_sources_lengths = None self.valid_targets_lengths = None self.cost = None self.input_data = None self.targets = None self.keep_prob = None self.target_sequence_length = None self.max_target_sequence_length = None self.source_sequence_length = None self.lr = None self.train_logits = None self.inference_logits = None self.bn = build_network(params)
import os import check_data import tensorflow as tf from glob import glob from build_network import build_network # Path to data data_dir = './data' # Choose your parameters here params = { 'epoch_count': 1, 'batch_size': 32, 'z_dim': 100, 'learning_rate': 0.0002, 'beta1': 0.5 } bn = build_network(params) celeba_dataset = check_data.Dataset( 'celeba', glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))) with tf.Graph().as_default(): bn.train(celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)
def main(): model = build_network() print("The graph has been built.") f_text = open(sys.argv[1], encoding="utf-8") f_cap = open(sys.argv[2]) val_text, val_lengths = data_to_tensor(itertools.islice(f_text, 400), ALPHABET_DICT) val_cap, _ = data_to_tensor(itertools.islice(f_cap, 400), TARGET_DICT) print("Validation data are ready.") train_op = get_train_op(model) print("Optimizer has been built.") session = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=8)) session.run(tf.initialize_all_variables()) saver = tf.train.Saver() print("Session initialized.") batch_n = 0 max_acc = 0. while True: batch_n += 1 text_batch, batch_lengths = data_to_tensor( itertools.islice(f_text, 50), ALPHABET_DICT) cap_batch, _ = data_to_tensor(itertools.islice(f_cap, 50), TARGET_DICT) if text_batch.shape == (0, ): break _, predictions, cross_entropy = session.run( [train_op, model.predictions, model.cost], feed_dict={ model.input: text_batch, model.targets: cap_batch, model.lengths: batch_lengths }) accuracy = evaluation(predictions, cap_batch, batch_lengths) print("batch {}:\tacc: {:.4f}\txent: {:.4f}".format( batch_n, accuracy, cross_entropy)) if batch_n % 10 == 0: predictions, cross_entropy = session.run( [model.predictions, model.cost], feed_dict={ model.input: val_text, model.targets: val_cap, model.lengths: val_lengths }) accuracy = evaluation(predictions, val_cap, batch_lengths) print("") print("Valdidation after batch {}".format(batch_n)) print(" accuracy: {:.5f}".format(accuracy)) print(" cross-entropy: {:.5f}".format(cross_entropy)) print("") if accuracy > max_acc: max_acc = accuracy saver.save(session, "model.variables") f_text.close() f_cap.close()
def predict(sample_paths, args): model, start_epoch = build_network(snapshot=args.snapshot, backend='retinanet') model.eval() if not os.path.exists(cfg.result_path): os.makedirs(cfg.result_path) print("Begin to predict mask: ", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) for sample_path in sample_paths: filename = sample_path.split('/')[-1].split('.')[0] read = kfbReader.reader() read.ReadInfo(sample_path, 20, False) width = read.getWidth() height = read.getHeight() image_shape = (width, height) strides, x_num, y_num = calc_split_num((width, height)) model.eval() regressBoxes = BBoxTransform() clipBoxes = ClipBoxes() transformed_all = [] classification_all = [] for i in range(x_num): for j in range(y_num): x = strides[0] * i if i < x_num - 1 else image_shape[ 0] - cfg.patch_size[0] y = strides[1] * j if j < y_num - 1 else image_shape[ 1] - cfg.patch_size[1] img = read.ReadRoi(x, y, cfg.patch_size[0], cfg.patch_size[1], scale=20).copy() img = img.transpose((2, 0, 1)) img = img[np.newaxis, :, :, :] img = img.astype(np.float32) / 255.0 img = torch.from_numpy(img).float() with torch.no_grad(): classification, regression, anchors = model(img.cuda()) transformed_anchors = regressBoxes(anchors, regression) transformed_anchors = clipBoxes(transformed_anchors) scores = classification scores_over_thresh = (scores > 0.05)[0, :, 0] if scores_over_thresh.sum() == 0: continue classification = classification[0, scores_over_thresh, :] transformed_anchors = transformed_anchors[ 0, scores_over_thresh, :] transformed_anchors[:, 0] = transformed_anchors[:, 0] + x transformed_anchors[:, 1] = transformed_anchors[:, 1] + y transformed_anchors[:, 2] = transformed_anchors[:, 2] + x transformed_anchors[:, 3] = transformed_anchors[:, 3] + y scores = scores[0, scores_over_thresh, :] transformed_all.append( torch.cat([transformed_anchors, scores], dim=1)) classification_all.append(classification) # transformed_all = torch.cat(transformed_all, dim=0) # classification_all = torch.cat(classification_all, dim=0) # anchors_num_idx = nms(transformed_all, 0.5) # nms_scores = classification_all[anchors_num_idx, :] # nms_transformed = transformed_all[anchors_num_idx, :] # scores = nms_scores.detach().cpu().numpy() # transformed = nms_transformed.detach().cpu().numpy() # pos_all = [] # for i in range(scores.shape[0]): # x = int(transformed[i, 0]) # y = int(transformed[i, 1]) # w = max(int(transformed[i, 2] - transformed[i, 0]), 1) # h = max(int(transformed[i, 3] - transformed[i, 1]), 1) # p = float(scores[i, 0]) # pos = {'x': x, 'y': y, 'w': w, 'h': h, 'p': p} # pos_all.append(pos) transformed_all = torch.cat(transformed_all, dim=0) classification_all = torch.cat(classification_all, dim=0) #print("transformed_all.size(0)=", transformed_all.size(0)) #print("classification_all.size(0)=", classification_all.size(0)) num = int((transformed_all.size(0) + 200000) / 200000) #print("num=", num) pos_all = [] trans = transformed_all.chunk(num, 0) classi = classification_all.chunk(num, 0) for i in range(num): #print("len(trans[i]),len(classi[i])=",len(trans[i]),len(classi[i])) pos_all = handle_nms(trans[i], classi[i], pos_all) #print("len(pos_all)=", len(pos_all)) with open(os.path.join(cfg.result_path, filename + ".json"), 'w') as f: json.dump(pos_all, f) print("Finish predict mask: ", filename, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
def predict(sample_path, args): model, start_epoch = build_network(snapshot=args.snapshot, backend='retinanet') model.eval() if not os.path.exists(cfg.result_path): os.makedirs(cfg.result_path) test_data = CervicalDataset(sample_path, cfg.patch_size, transform=transforms.Compose([Normalizer()])) test_loader = DataLoader(test_data, batch_size=1, shuffle=True, drop_last=False, collate_fn=collater, num_workers=0) model.eval() regressBoxes = BBoxTransform() clipBoxes = ClipBoxes() with torch.no_grad(): for idx, data in enumerate(test_loader): st = time.time() annotations = data['label'].cuda() classification, regression, anchors = model(data['img'].cuda()) scores, transformed_anchors = transform_anchors( classification, regression, anchors, regressBoxes, clipBoxes) print('Elapsed time: {}'.format(time.time() - st)) scores = scores.detach().cpu().numpy() transformed_anchors = transformed_anchors.detach().cpu().numpy() idxs = np.where(scores > 0.5) img = np.array(255 * data['img'][0, :, :, :]).copy() img[img < 0] = 0 img[img > 255] = 255 img = np.transpose(img, (1, 2, 0)) img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB) img_anno = img.copy() for j in range(idxs[0].shape[0]): bbox = transformed_anchors[idxs[0][j], :] x1 = int(bbox[0]) y1 = int(bbox[1]) x2 = int(bbox[2]) y2 = int(bbox[3]) draw_caption(img, (x1, y1, x2, y2), str(scores[idxs[0][j]])) cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2) for j in range(annotations.shape[1]): bbox = annotations[0, j, :] x1 = int(bbox[0]) y1 = int(bbox[1]) x2 = int(bbox[2]) y2 = int(bbox[3]) draw_caption(img_anno, (x1, y1, x2, y2), 'pos') cv2.rectangle(img_anno, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2) merge_img = np.hstack([img, img_anno]) cv2.imwrite( os.path.join(cfg.result_path, "result" + str(idx) + ".jpg"), merge_img)
def predict(sample_paths, args): model, start_epoch = build_network(snapshot=args.snapshot, backend="retinanet") model.eval() if not os.path.exists(cfg.result_path): os.makedirs(cfg.result_path) print( "Begin to predict mask: ", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ) for sample_path in sample_paths: filename = sample_path.split("/")[-1].split(".")[0] read = kfbReader.reader() read.ReadInfo(sample_path, 20, False) width = read.getWidth() height = read.getHeight() image_shape = (width, height) strides, x_num, y_num = calc_split_num((width, height)) model.eval() regressBoxes = BBoxTransform() clipBoxes = ClipBoxes() transformed_all = [] classification_all = [] for i in range(x_num // 2): for j in range(y_num // 2): x = ( strides[0] * i if i < x_num - 1 else image_shape[0] - cfg.patch_size[0] ) y = ( strides[1] * j if j < y_num - 1 else image_shape[1] - cfg.patch_size[1] ) img = read.ReadRoi( x, y, cfg.patch_size[0], cfg.patch_size[1], scale=20 ).copy() img = img.transpose((2, 0, 1)) img = img[np.newaxis, :, :, :] img = img.astype(np.float32) / 255.0 img = torch.from_numpy(img).float() with torch.no_grad(): classification, regression, anchors = model(img.cuda()) transformed_anchors = regressBoxes(anchors, regression) transformed_anchors = clipBoxes(transformed_anchors) scores = classification scores_over_thresh = (scores > 0.05)[0, :, 0] if scores_over_thresh.sum() == 0: continue classification = classification[0, scores_over_thresh, :] transformed_anchors = transformed_anchors[0, scores_over_thresh, :] transformed_anchors[:, 0] = transformed_anchors[:, 0] + x transformed_anchors[:, 1] = transformed_anchors[:, 1] + y transformed_anchors[:, 2] = transformed_anchors[:, 2] + x transformed_anchors[:, 3] = transformed_anchors[:, 3] + y scores = scores[0, scores_over_thresh, :] transformed_all.append(torch.cat([transformed_anchors, scores], dim=1)) classification_all.append(classification) transformed_all = torch.cat(transformed_all, dim=0) classification_all = torch.cat(classification_all, dim=0) anchors_num_idx = nms(transformed_all, 0.5) nms_scores = classification_all[anchors_num_idx, :] nms_transformed = transformed_all[anchors_num_idx, :] scores = nms_scores.detach().cpu().numpy() transformed = nms_transformed.detach().cpu().numpy() pos_all = [] for i in range(scores.shape[0]): x = int(transformed[i, 0]) y = int(transformed[i, 1]) w = max(int(transformed[i, 2] - transformed[i, 0]), 1) h = max(int(transformed[i, 3] - transformed[i, 1]), 1) p = float(scores[i, 0]) pos = {"x": x, "y": y, "w": w, "h": h, "p": p} pos_all.append(pos) with open(os.path.join(cfg.result_path, filename + ".json"), "w") as f: json.dump(pos_all, f) print( "Finish predict mask: ", filename, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), )
def train_network(args): ##setup batch_size = args["batchsize"] np.random.seed(args["seed"]) tf.set_random_seed(args["seed"]) dataset = args["dataset"] tag = args["tag"] import os os.environ["CUDA_VISIBLE_DEVICES"] = str(args["device"]) direc = util.get_directory(direc="./outputs/", tag=tag) util.save_dict(direc + "/training_params.csv", args) ####### ##get data data = get_data.get_data(dataset, "train") data = [tf.expand_dims(data[0], -1), data[1]] tr_lab, tr_dat = tf.train.shuffle_batch(data, batch_size, capacity=30, min_after_dequeue=10, seed=0) tedata = get_data.get_data(dataset, "test") tedata = [tf.expand_dims(tedata[0], -1), tedata[1]] te_lab, te_dat = tf.train.shuffle_batch(tedata, batch_size, capacity=30, min_after_dequeue=10, seed=0) ########## ##Build Network input_tensor = tf.placeholder(tf.float32, tr_dat.shape) enc, prob, pred, syst, off, init_prob = net.build_network( input_tensor, args["nenc"], args["nstate"], True, syspicktype=args["syspick"]) ############### ##Losses lik = losses.likelihood_loss(enc, pred, prob) rms = losses.rms_loss(enc, pred, prob) mine = losses.MINE_loss(enc, prob) pre_ent = losses.sys_prior_ent_loss(prob) post_ent = losses.sys_posterior_ent_loss(prob) emean = tf.reduce_mean(enc, axis=[0, 1], keepdims=True) varreg = tf.maximum((1. / (.001 + tf.reduce_mean((enc - emean)**2))) - 1., 0) meanediff = tf.reduce_mean((enc[:, :-1] - enc[:, 1:])**2) prederr = tf.reduce_mean( tf.expand_dims(prob[:, :-1], -1) * (tf.expand_dims(enc[:, 1:], 2) - pred[:, :-1])**2) scalereg = tf.reduce_mean(tf.reduce_sum(enc**2, 2)) loss = lik adamopt = tf.train.AdamOptimizer(learning_rate=.001) fulltrain = adamopt.minimize(loss) init = tf.global_variables_initializer() coord = tf.train.Coordinator() sess = tf.Session() sess.run(init) threads = tf.train.start_queue_runners(coord=coord, sess=sess) test = [sess.run([te_dat, te_lab]) for k in range(3)] LOG = log.log(direc + "/logfile.log", name="epoch,prederr,prior_entropy") dat, lab = sess.run([tr_dat, tr_lab]) for k in range(args["epochs"]): dat, lab = sess.run([tr_dat, tr_lab]) tr, pe = sess.run([fulltrain, pre_ent], {input_tensor: dat}) if k % 50 == 0: rms_error = 0 for t in range(len(test)): dat, lab = test[t] r = sess.run(prederr, {input_tensor: dat}) rms_error += r rms_error /= len(test) LOG.log("{}\t{}\t{}".format(k, rms_error, pe)) ###make test data lab = [] dat = [] e = [] p = [] pr = [] NN = args["ntestbatch"] for k in range(NN): d, l = sess.run([tr_dat, tr_lab]) en, pp, ppr = sess.run([enc, prob, pred], {input_tensor: d}) lab.append(d) dat.append(l) e.append(en) p.append(pp) pr.append(ppr) lab = np.concatenate(lab) dat = np.concatenate(dat) e = np.concatenate(e) p = np.concatenate(p) pr = np.concatenate(pr) sys, O = sess.run([syst, off]) sysdense = sess.run(trainable("syspick")) for s in range(len(sysdense)): np.savetxt(direc + "/nascar_syspick_{}.csv".format(s), sysdense[s]) np.savetxt(direc + "/nascar_lab.csv", np.reshape(lab, [batch_size * NN, -1])) np.savetxt(direc + "/nascar_dat.csv", np.reshape(dat, [batch_size * NN, -1])) np.savetxt(direc + "/nascar_enc.csv", np.reshape(e, [batch_size * NN, -1])) np.savetxt(direc + "/nascar_pro.csv", np.reshape(p, [batch_size * NN, -1])) np.savetxt(direc + "/nascar_pre.csv", np.reshape(pr, [batch_size * NN, -1])) np.savetxt(direc + "/nascar_sys.csv", np.reshape(sys, [len(sys), -1])) np.savetxt(direc + "/nascar_O.csv", O) coord.request_stop() coord.join(threads, stop_grace_period_secs=5) sess.close()
# Make input vectors linear interpolation from first to last if config.getboolean('DEFAULT', 'interpolate_input'): sr_utils.makeInterpolation(state.imgs) # Finished modifying inputs; now save them. Also make baseline figures. for j in range(len(state.imgs)): torch.save(state.imgs[j]['net_input'], "output/inputs/input_{}.pt".format(j)) sr_utils.make_baseline_figure( state.imgs[j]['HR_torch'].cpu(), state.imgs[j]['HR_torch_bicubic'].cpu(), state.imgs[j]['LR_torch'].cpu(), 'baseline_{}'.format(j) ) state.net = build_network(state.dtype) c = build_closure(writer, state.dtype) state.i = 0 p = [x for x in state.net.parameters()] #sr_utils.printMetrics() if config.has_option('LOADING', 'augmented_history') and \ config.getboolean('LOADING', 'augmented_history'): print("Iteration / Frame used / psnr_LR / psnr_HR / psnr_blurred / psnr_downsampled ") elif not (config.has_option('LOADING', 'ignore_ground_truth') and \ config.getboolean('LOADING', 'ignore_ground_truth')): print("Iteration / Frame used / psnr_LR / psnr_HR ") else: print("Iteration / Frame used / psnr_LR / Training Loss ")
# make the network ######################### infilename = iodir + "extract.out" boxfilename = iodir + "boxes" + filetag + ".out" outfilename = iodir + "connections" + filetag + ".out" senfilename = iodir + "sentiment" + filetag + ".out" graphfilename = iodir + "graph" + filetag + ".txt" if not config.getboolean("network", "default_filenames"): infilename = config.get("network", "input_file") boxfilename = config.get("network", "box_file") outfilename = config.get("network", "con_file") senfilename = config.get("network", "sen_file") graphfilename = config.get("network", "graph_file") if config.getboolean("network", "regen") or (not os.path.isfile(outfilename)): build_network(infilename, boxfilename, outfilename, senfilename, graphfilename, size=size, county=county, stats_file=stats_file, randchoice=False) else: print("Using existing", outfilename); ######################### # find communities ######################### infilename = iodir + "connections" + filetag + ".out" graphfilename = iodir + "graph" + filetag + ".txt" outfilename = iodir + "communities" + filetag + ".out" if not config.getboolean("findcom", "default_filenames"): infilename = config.get("findcom", "input_file") graphfilename = config.get("findcom", "graph_file") outfilename = config.get("findcom", "find_file")
def train_network(args): ##setup batch_size = args["batchsize"] np.random.seed(args["seed"]) tf.set_random_seed(args["seed"]) dataset = args["dataset"] tag = args["tag"] train_mode = args["train_mode"] import os os.environ["CUDA_VISIBLE_DEVICES"] = str(args["device"]) direc = util.get_directory(direc="./outputs/", tag=tag) util.save_dict(direc + "/training_params", args) ####### ##get data data = get_data.get_data(dataset, "train") data = [tf.expand_dims(data[0], -1), data[1]] tr_dat, tr_lab = tf.train.shuffle_batch(data, batch_size, capacity=30, min_after_dequeue=10, seed=0) tedata = get_data.get_data(dataset, "test") tedata = [tf.expand_dims(tedata[0], -1), tedata[1]] te_dat, te_lab = tf.train.shuffle_batch(tedata, batch_size, capacity=30, min_after_dequeue=10, seed=0) ########## ##Build Network input_tensor = tf.placeholder(tf.float32, tr_dat.shape) enc, prob, pred, syst, off, init_prob = net.build_network( input_tensor, args["nenc"], args["nstate"], False, syspicktype=args["syspick"]) ############### ##Losses rms = losses.likelihood_loss(enc, pred, prob) mine = losses.MINE_loss(enc, prob) minevar = trainable(scope="MINE") minereg = tf.reduce_max([tf.reduce_max(k**2) for k in minevar]) othervar = trainable(scope="enc") otherreg = tf.reduce_max([tf.reduce_max(k**2) for k in othervar]) pre_ent = losses.sys_prior_ent_loss(prob) post_ent = losses.sys_posterior_ent_loss(prob) emean = tf.reduce_mean(enc, axis=[0, 1], keepdims=True) varreg = tf.maximum((1. / (.001 + tf.reduce_mean((enc - emean)**2))) - .5, 0) meanediff = tf.reduce_mean((enc[:, :-1] - enc[:, 1:])**2) prederr = tf.reduce_mean( tf.expand_dims(prob[:, :-1], -1) * (tf.expand_dims(enc[:, 1:], 2) - pred[:, :-1])**2) pererr = tf.reduce_mean( tf.expand_dims(prob[:, :-1], -1) * ((tf.expand_dims(enc[:, 1:], 2) - pred[:, :-1])**2)) / tf.reduce_mean( tf.expand_dims((enc[:, :-1] - enc[:, 1:]), 2)**2) scalereg = tf.reduce_mean(tf.reduce_sum(enc**2, 2)) loss = args["likloss"] * rms reg = args["regloss"] * (mine + scalereg + varreg + minereg + otherreg) reg += args["ent_loss"] * post_ent minegradreg = losses.MINE_grad_regularization(enc) reg += args["MINE_grad_reg"] * minegradreg ######## adamopt = tf.train.AdamOptimizer(learning_rate=.0001) fulltrain = adamopt.minimize(loss + reg) minetrain = adamopt.minimize(reg, var_list=trainable("MINE") + trainable("enc")) systtrain = adamopt.minimize(loss, var_list=trainable("sys")) ######## init = tf.global_variables_initializer() coord = tf.train.Coordinator() sess = tf.Session() sess.run(init) threads = tf.train.start_queue_runners(coord=coord, sess=sess) ########TRAINING test = [sess.run([te_dat, te_lab]) for k in range(3)] LOG = log.log(["epoch", "percenterr", "prior_entropy", "encmean", "mine"], PRINT=True) dat, lab = sess.run([tr_dat, tr_lab]) for k in range(args["epochs"]): dat, lab = sess.run([tr_dat, tr_lab]) #get data batch if train_mode == "full": tr, pe = sess.run([fulltrain, pre_ent], {input_tensor: dat}) elif train_mode == "minefirst": if k < args["epochs"] / 2: tr, pe = sess.run([minetrain, pre_ent], {input_tensor: dat}) else: tr, pe = sess.run([systtrain, pre_ent], {input_tensor: dat}) elif train_mode == "mineonly": tr, pe = sess.run([minetrain, pre_ent], {input_tensor: dat}) else: print("Training mode not recognized") exit() if k % 50 == 0: teloss = 0 tmean = 0 mineloss = 0 per_error = 0 for t in range(len(test)): dat, lab = test[t] l, e, m, r = sess.run([meanediff, enc, mine, pererr], {input_tensor: dat}) teloss += l tmean += np.max(e**2) mineloss += m per_error += r teloss /= len(test) tmean /= len(test) mineloss /= len(test) per_error /= len(test) LOG.log([k, per_error, pe, tmean, mineloss]) LOG.save(direc + "/logfile.json") ###make test data lab = [] dat = [] e = [] p = [] pr = [] NN = args["ntestbatch"] for k in range(NN): d, l = sess.run([tr_dat, tr_lab]) en, pp, ppr = sess.run([enc, prob, pred], {input_tensor: d}) lab.append(l) dat.append(d) e.append(en) p.append(pp) pr.append(ppr) lab = np.concatenate(lab) dat = np.concatenate(dat) e = np.concatenate(e) p = np.concatenate(p) pr = np.concatenate(pr) sys, O = sess.run([syst, off]) sysdense = sess.run(trainable("syspick_dense")) for s in range(len(sysdense)): np.savetxt(direc + "/nascar_syspick_{}.csv".format(s), sysdense[s]) np.savetxt(direc + "/nascar_lab.csv", np.reshape(lab, [batch_size * NN, -1])) np.savetxt(direc + "/nascar_dat.csv", np.reshape(dat, [batch_size * NN, -1])) np.savetxt(direc + "/nascar_enc.csv", np.reshape(e, [batch_size * NN, -1])) np.savetxt(direc + "/nascar_pro.csv", np.reshape(p, [batch_size * NN, -1])) np.savetxt(direc + "/nascar_pre.csv", np.reshape(pr, [batch_size * NN, -1])) np.savetxt(direc + "/nascar_sys.csv", np.reshape(sys, [len(sys), -1])) np.savetxt(direc + "/nascar_O.csv", O) coord.request_stop() coord.join(threads, stop_grace_period_secs=5) sess.close()