def test_image( result_folder, epoch=None, image_idx=0, use_cpu=False): """Test a network on one test image.""" if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() _LOGGER.info("Loading data...") tr_data, _ = training_data() te_data, _ = test_data() from data import _MEAN _LOGGER.info("Loading network...") # Load the model for training. model, _, _, _ = _model(result_folder, tr_data.shape[0], epoch=epoch) _LOGGER.info("Predicting...") results = model.predict(te_data[:image_idx + 1], test_callbacks=[ RandCropMonitor('data', _MEAN), mnt.ProgressIndicator() ], out_blob_names=['score']) _LOGGER.info("Prediction for image %d: %s.", image_idx, str(results[image_idx]))
def score( result_folder, epoch=None, use_cpu=False): """Test a network on the dataset.""" if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() _LOGGER.info("Loading data...") tr_data, _ = training_data() te_data, te_labels = test_data() from data import _MEAN _LOGGER.info("Loading network...") # Load the model. model, _, _, _ = _model(result_folder, tr_data.shape[0], epoch=epoch, no_solver=True) _LOGGER.info("Predicting...") results = model.predict(te_data, test_callbacks=[ RandCropMonitor('data', _MEAN), mnt.ProgressIndicator() ], out_blob_names=['score']) _LOGGER.info("Accuracy: %f.", accuracy_score(te_labels, np.argmax(np.array(results), axis=1)))
def predict(): weights_path = os.path.join(checkpoints_dir, "weights.h5") net = load_model(weights_path, custom_objects={"focal_loss_fixed": focal_loss()}) X_test, y_test = test_data() preds = net.predict(X_test, batch_size=batch_size, verbose=1) return preds[0], y_test[0]
def train(): if not os.path.exists(checkpoints_dir): os.makedirs(checkpoints_dir) if not os.path.exists(logs_dir): os.makedirs(logs_dir) X_train, y_train = train_data() X_test, y_test = test_data() print("Training and validation data processed.") model = multitask_cnn() optimizer = RMSprop(lr=base_lr) model.compile( optimizer=optimizer, loss=loss_dict, loss_weights=loss_weights_dict, metrics=["accuracy"], ) training_log = TensorBoard(log_dir=os.path.join(logs_dir, "log"), write_graph=False) callbacks = [training_log] for e in range(epochs): X_train_augmented = augment(X_train) model.fit( {"thyroid_input": X_train_augmented}, y_train, validation_data=(X_test, y_test), batch_size=batch_size, epochs=e + 1, initial_epoch=e, shuffle=True, callbacks=callbacks, ) if np.mod(e + 1, 10) == 0: y_pred = model.predict(X_train, batch_size=batch_size, verbose=1) auc_train = roc_auc_score(y_train["out_cancer"], y_pred[0]) y_pred = model.predict(X_test, batch_size=batch_size, verbose=1) auc_test = roc_auc_score(y_test[0], y_pred[0]) with open(os.path.join(logs_dir, "auc.txt"), "a") as auc_file: auc_file.write("{},{}\n".format(auc_train, auc_test)) model.save(os.path.join(checkpoints_dir, "weights.h5")) print("Training completed.")
def test_unit(T=100): X = data.test_data(T) bh_reward = algo.hindsight(X) x_oga, oga_rewards = algo.oga(X) x_ons, ons_rewards = algo.ons(X) print("final x_oga = ", x_oga) print("final x_ons = ", x_ons) print("should be", 0.5 * np.ones([2, 1])) algs_reward = [[np.cumsum(oga_rewards), "oga"], [np.cumsum(ons_rewards), "ons"]] plot.plot_regret(bh_rewards, algs_rewards)
def test_image(result_folder, epoch=None, image_idx=0, use_cpu=False): """Test a network on one test image.""" if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() _LOGGER.info("Loading data...") tr_data, _ = training_data() te_data, _ = test_data() _LOGGER.info("Loading network...") # Load the model for training. model, _, _, _ = _model(result_folder, tr_data.shape[0], epoch=epoch) _LOGGER.info("Predicting...") results = model.predict(te_data, test_callbacks=[mnt.ProgressIndicator()]) _LOGGER.info("Prediction for image %d: %s.", image_idx, str(results[image_idx]))
def test_image( result_folder, epoch=None, image_idx=0, use_cpu=False): """Test a network on one test image.""" if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() _LOGGER.info("Loading data...") tr_data, _ = training_data() te_data, _ = test_data() _LOGGER.info("Loading network...") # Load the model for training. model, _, _, _ = _model(result_folder, tr_data.shape[0], epoch=epoch) _LOGGER.info("Predicting...") results = model.predict(te_data, test_callbacks=[mnt.ProgressIndicator()]) _LOGGER.info("Prediction for image %d: %s.", image_idx, str(results[image_idx]))
def cli(result_folder, model_name=None, epoch=None, num_epoch=150, optimizer_name='sgd', lr_param=0.1, lr_decay_sched='90,135', lr_decay_ratio=0.1, mom_param=0.9, wd_param=0.0001, monitor=False, allow_overwrite=False, use_cpu=False): """Train a model.""" print("Parameters: ", sys.argv) if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() # Load the data. tr_data, tr_labels = training_data() te_data, te_labels = test_data() from data import _MEAN # Setup the output folder, including logging. model, optimizer, out_folder, base_iter = _model( result_folder, tr_data.shape[0], model_name, epoch, 10, optimizer_name, lr_param, lr_decay_sched, lr_decay_ratio, mom_param, wd_param, False, allow_overwrite) batch_size = model.blobs['data'].shape[0] logger = mnt.JSONLogger(str(out_folder), 'model', { 'train': ['train_loss', 'train_accuracy'], 'test': ['test_loss', 'test_accuracy'] }, base_iter=base_iter, write_every=round_to_mbsize(10000, batch_size), create_plot=monitor) progr_ind = mnt.ProgressIndicator() cropper = RandCropMonitor('data', _MEAN) if monitor: extra_monitors = [ mnt.ActivationMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep), selected_blobs=['resblock3_out', 'avpool'], sample={'data': tr_data[0]}), mnt.FilterMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep), selected_parameters={ 'resblock1_conv1': [0], 'resblock3_conv1': [0], 'resblock7_conv1': [0] }), mnt.GradientMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep), relative=True, selected_parameters={ 'resblock1_conv1': [0, 1], 'resblock3_conv1': [0, 1], 'resblock7_conv1': [0, 1] }), ] else: extra_monitors = [] model.fit(round_to_mbsize(num_epoch * 50000, batch_size), optimizer, X={ 'data': tr_data, 'labels': tr_labels }, X_val={ 'data': te_data, 'labels': te_labels }, test_interval=round_to_mbsize(50000, batch_size), train_callbacks=[ progr_ind, logger, mnt.RotatingMirroringMonitor({'data': 0}, 0, 0.5), cropper, mnt.Checkpointer(os.path.join(str(out_folder), 'model'), round_to_mbsize(50000 * 10, batch_size), base_iterations=base_iter), ] + extra_monitors, test_callbacks=[progr_ind, cropper, logger], shuffle=True)
model_save_path = r'model_weights.h5' # Loading Data X, y = data_load(ip_data_path, op_data_path) # Reduce y to 8th order y = y[:,:45] print('Data Loaded ... \n') res_model = build_nn_resnet() print('Network Constructed ... \n') print('Training Network ... \n') res_model = train_network(res_model, X, y, num_epoch=400, batch=1000, save_path=model_save_path) print('Making Predictions and Saving file') #save_file_path = r'D:\Users\Vishwesh\PycharmProjects\Deep_PNAS\Model_Results_2019\seq_resnet_v2.mat' save_file_path = r'test_resnet_v2.mat' test_data(res_model, test_data_path, save_file_path)
def main(): # tolerance in the computation tol = 1e-10 # assign the flag for the low permeable fractures mesh_size = 0.5*1e-2 tol_network = mesh_size mesh_kwargs = {"mesh_size_frac": mesh_size, "mesh_size_min": mesh_size / 20} # read and mark the original fracture network, the fractures id will be preserved file_name = "network.csv" domain = {"xmin": 0, "xmax": 1, "ymin": -1, "ymax": 1} network = pp.fracture_importer.network_2d_from_csv(file_name, domain=domain) # set the original id network.tags["original_id"] = np.arange(network.num_frac, dtype=np.int) # save the original network network_original = network.copy() # set the condition, meaning if for a branch we solve problem with a < (1) or with > (0) # for simplicity we just set all equal network.tags["condition"] = np.ones(network.num_frac, dtype=np.int) flux_threshold = 0.15 cond = lambda flux, op, tol=0: condition_interface(flux_threshold, flux, op, tol) file_name = "case2" folder_name = "./linear/" variable_to_export = [Flow.pressure, Flow.P0_flux, "original_id", "condition"] iteration = 0 max_iteration = 1e3 okay = False while not okay: print("iteration", iteration) # create the grid bucket gb = network.mesh(mesh_kwargs, dfn=True, preserve_fracture_tags=["original_id", "condition"]) # create the discretization discr = Flow(gb) discr.set_data(test_data()) # problem solution A, b = discr.matrix_rhs() x = sps.linalg.spsolve(A, b) discr.extract(x) # exporter save = pp.Exporter(gb, "sol_" + file_name, folder_name=folder_name) save.write_vtu(variable_to_export, time_step=iteration) # save the network points to check if we have reached convergence old_network_pts = network.pts # construct the new network such that the interfaces are respected network = detect_interface(gb, network, network_original, discr, cond, tol) # export the current network with the associated tags network_file_name = make_file_name(file_name, iteration) network.to_file(network_file_name, data=network.tags, folder_name=folder_name, binary=False) # check if any point in the network has changed all_pts = np.hstack((old_network_pts, network.pts)) distances = pp.distances.pointset(all_pts) > tol_network # consider only the block between the old and new points distances = distances[:old_network_pts.shape[1], -network.pts.shape[1]:] # check if an old point has a point equal in the new set check = np.any(np.logical_not(distances), axis=0) if np.all(check) or iteration > max_iteration: okay = True iteration += 1 save.write_pvd(np.arange(iteration), np.arange(iteration)) write_network_pvd(file_name, folder_name, np.arange(iteration))
import models from data import gen, test_data import matplotlib.pyplot as plt shape = (256, 256) # Load model print('\n') print('-' * 30) print('Loading model...') print('-' * 30) model = models.unet(shape, models.res_block_basic, models.Activation('relu'), 0, False) #model = models.get_unet(shape) #model = models.test_net(shape) model.load_weights('./weights/weights.hdf5') # Look at sample predictions print('\n') print('-' * 30) print('Sample predictions...') print('-' * 30) Xt = test_data(shape) Yt = model.predict(Xt, verbose=1) for i in range(Xt.shape[0]): plt.subplot(1, 2, 1) plt.imshow(Xt[i, 0, :, :]) plt.subplot(1, 2, 2) plt.imshow(Yt[i, 0, :, :]) plt.show()
def cli(result_folder, model_name=None, epoch=None, num_epoch=3, optimizer_name='sgd', lr_param=0.001, lr_decay_sched='90,135', lr_decay_ratio=0.1, mom_param=0.9, wd_param=0.0001, monitor=False, allow_overwrite=False, use_cpu=False): """Train a model.""" print("Parameters: ", sys.argv) if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() # Load the data. tr_data, tr_labels = training_data() te_data, te_labels = test_data() # Setup the output folder, including logging. model, optimizer, out_folder, base_iter = _model( result_folder, tr_data.shape[0], model_name, epoch, 1, optimizer_name, lr_param, lr_decay_sched, lr_decay_ratio, mom_param, wd_param, False, allow_overwrite) batch_size = model.blobs['data'].shape[0] logger = mnt.JSONLogger(str(out_folder), 'model', {'train': ['train_loss', 'train_accuracy'], 'test': ['test_loss', 'test_accuracy']}, base_iter=base_iter, write_every=round_to_mbsize(50000, batch_size), create_plot=monitor) progr_ind = mnt.ProgressIndicator() if monitor: extra_monitors = [ mnt.ActivationMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep), sample={'data': tr_data[0]}), mnt.FilterMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep)), mnt.GradientMonitor(round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), 'visualizations' + os.sep), relative=True), ] else: extra_monitors = [] model.fit(round_to_mbsize(num_epoch * tr_data.shape[0], batch_size), optimizer, X={'data': tr_data, 'labels': tr_labels}, X_val={'data': te_data, 'labels': te_labels}, test_interval=round_to_mbsize(tr_data.shape[0], batch_size), train_callbacks=[ progr_ind, logger, mnt.Checkpointer(os.path.join(str(out_folder), 'model'), round_to_mbsize(tr_data.shape[0], batch_size), base_iterations=base_iter), ] + extra_monitors, test_callbacks=[ progr_ind, logger])
def cli( result_folder, model_name=None, epoch=None, num_epoch=150, optimizer_name="sgd", lr_param=0.1, lr_decay_sched="90,135", lr_decay_ratio=0.1, mom_param=0.9, wd_param=0.0001, monitor=False, allow_overwrite=False, use_cpu=False, ): """Train a model.""" print("Parameters: ", sys.argv) if use_cpu: bnet.set_mode_cpu() else: bnet.set_mode_gpu() # Load the data. tr_data, tr_labels = training_data() te_data, te_labels = test_data() from data import _MEAN # Setup the output folder, including logging. model, optimizer, out_folder, base_iter = _model( result_folder, tr_data.shape[0], model_name, epoch, 10, optimizer_name, lr_param, lr_decay_sched, lr_decay_ratio, mom_param, wd_param, False, allow_overwrite, ) batch_size = model.blobs["data"].shape[0] logger = mnt.JSONLogger( str(out_folder), "model", {"train": ["train_loss", "train_accuracy"], "test": ["test_loss", "test_accuracy"]}, base_iter=base_iter, write_every=round_to_mbsize(10000, batch_size), create_plot=monitor, ) progr_ind = mnt.ProgressIndicator() cropper = RandCropMonitor("data", _MEAN) if monitor: extra_monitors = [ mnt.ActivationMonitor( round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), "visualizations" + os.sep), selected_blobs=["resblock3_out", "avpool"], sample={"data": tr_data[0]}, ), mnt.FilterMonitor( round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), "visualizations" + os.sep), selected_parameters={"resblock1_conv1": [0], "resblock3_conv1": [0], "resblock7_conv1": [0]}, ), mnt.GradientMonitor( round_to_mbsize(10000, batch_size), os.path.join(str(out_folder), "visualizations" + os.sep), relative=True, selected_parameters={"resblock1_conv1": [0, 1], "resblock3_conv1": [0, 1], "resblock7_conv1": [0, 1]}, ), ] else: extra_monitors = [] model.fit( round_to_mbsize(num_epoch * 50000, batch_size), optimizer, X={"data": tr_data, "labels": tr_labels}, X_val={"data": te_data, "labels": te_labels}, test_interval=round_to_mbsize(50000, batch_size), train_callbacks=[ progr_ind, logger, mnt.RotatingMirroringMonitor({"data": 0}, 0, 0.5), cropper, mnt.Checkpointer( os.path.join(str(out_folder), "model"), round_to_mbsize(50000 * 10, batch_size), base_iterations=base_iter, ), ] + extra_monitors, test_callbacks=[progr_ind, cropper, logger], shuffle=True, )
import data import assign import itertools import numpy as np colleges, students = data.test_data(others=60) NDIM = 3 result = np.zeros((len(colleges),)*NDIM + (NDIM,)) for choices in itertools.product(np.arange(len(colleges)), repeat=NDIM): students.choice[:NDIM] = choices print "C:", choices probs = assign.prob_outcomes(colleges, students, sigma_i=0.2) print "Results:" print for ci, college in enumerate(colleges): print "{0.name} (T={0.threshold:.2f}, C={0.capacity})".format(college) for si, (student, p) in enumerate(zip(students[:NDIM], probs)): if student.choice != ci: continue print " {0.name} (G={0.grade:.2f}):".format(student) for outcome in data.Outcome: print " {:>8s}:{: 6.1f}".format(outcome.name, 100*p[outcome]) print prob_c = assign.prob_colleges(colleges, students, probs)