def testall(directory, pred_file=None, label_file=None, out_path=None): folders = os.listdir(directory) networks = [] for folder in folders: if os.path.isfile(directory+folder+"/network.cfg") and os.path.exists(directory+folder+"/results"): networks.append(folder) config_file = directory+networks[0]+"/network.cfg" config = ConfigParser.ConfigParser() config.read(config_file) test_data = LoadData(directory = config.get('Testing Data', 'folders').split(','), data_file_name = config.get('Testing Data', 'data_file'), label_file_name = config.get('Testing Data', 'label_file'), seg_file_name = config.get('Testing Data', 'seg_file')) res = Analyzer(raw = test_data.get_data()[0], target = test_data.get_labels()[0]) for net in networks: config_file = directory+net+"/network.cfg" config = ConfigParser.ConfigParser() config.read(config_file) res.add_results(results_folder = config.get('General','directory'), name = net, prediction_file = config.get('Testing', 'prediction_file')+'_0', learning_curve_file = 'learning_curve') res.analyze(-1, pred_file=pred_file, label_file=label_file, out_path=out_path) return res
def ViewResults(**kwargs): directory = kwargs.get("directory", "") network = kwargs.get("network", None) prediction_file = kwargs.get("predictions_file", None) if network: # Assume that all networks are tested on the same set of data config = ConfigParser.ConfigParser() config.read("networks/" + network + "/network.cfg") data = LoadData( directory=config.get("Testing Data", "folders").split(",")[0], data_file_name=config.get("Testing Data", "data_file"), label_file_name=config.get("Testing Data", "label_file"), ) if not prediction_file: prediction_file = "test_prediction_0" results = Analyzer(target=data.get_labels()[0], raw=data.get_data()[0]) results.add_results(results_folder="networks/" + network + "/", name=network, prediction_file=prediction_file) else: folders = os.listdir(directory) networks = [] for folder in folders: if os.path.isfile(directory + folder + "/network.cfg"): networks.append(folder) # Assume that all networks are tested on the same set of data config = ConfigParser.ConfigParser() config.read(directory + networks[0] + "/network.cfg") data = LoadData( directory=config.get("Testing Data", "folders").split(",")[0], data_file_name=config.get("Testing Data", "data_file"), label_file_name=config.get("Testing Data", "label_file"), ) if not prediction_file: prediction_file = "test_prediction_0" results = Analyzer(target=data.get_labels()[0], raw=data.get_data()[0]) for net in networks: results.add_results(results_folder=directory + net + "/", name=net, prediction_file=prediction_file) return results
def testprediction(config_file, pred_file=None, label_file=None, out_path=None): config = ConfigParser.ConfigParser() config.read(config_file) test_data = LoadData(directory = config.get('Testing Data', 'folders').split(','), data_file_name = config.get('Testing Data', 'data_file'), label_file_name = config.get('Testing Data', 'label_file'), seg_file_name = config.get('Testing Data', 'seg_file')) res = Analyzer(raw = test_data.get_data()[0], target = test_data.get_labels()[0]) res.add_results(results_folder = config.get('General','directory'), name = config_file.split('/')[-3], prediction_file = config.get('Testing', 'prediction_file')+'_0', learning_curve_file = 'learning_curve') res.analyze(-1, pred_file=pred_file, label_file=label_file, out_path=out_path) return res
def makeprediction(config_file, data_file=None, out_path=None, out_file=None, gpu=None): #Open configuration file for this network config = ConfigParser.ConfigParser() config.read(config_file) #Set the device on which to perform these computations if gpu: theano.sandbox.cuda.use(gpu) theano.config.nvcc.flags='-use=fast=math' theano.config.allow_gc=False else: device = config.get('General', 'device') theano.sandbox.cuda.use(device) if (device != 'cpu'): theano.config.nvcc.flags='-use=fast=math' theano.config.allow_gc=False #------------------------------------------------------------------------------ starttime=time.clock() print '\nInitializing Network' if os.path.exists(config.get('General', 'directory')+config.get('Network', 'weights_folder')): network = CNN(weights_folder = config.get('General', 'directory')+config.get('Network', 'weights_folder'), activation = config.get('Network', 'activation')) else: print 'Error: Weights folder does not exist. Could not initialize network' return; #------------------------------------------------------------------------------ print 'Opening Data Files' if data_file: test_data = LoadData(directory = '', data_file_name = data_file) else: test_data = LoadData(directory = config.get('Testing Data', 'folders').split(','), data_file_name = config.get('Testing Data', 'data_file')) #------------------------------------------------------------------------------ init_time = time.clock() - starttime print "Initialization = " + `init_time` + " seconds" starttime = time.clock() print 'Making Predictions' if out_path and out_file: network.predict(test_data.get_data(), results_folder = out_path, name = out_file) elif out_path: network.predict(test_data.get_data(), results_folder = out_path, name = config.get('Testing', 'prediction_file')) elif out_file: network.predict(test_data.get_data(), results_folder = config.get('General', 'directory')+config.get('Testing', 'prediction_folder'), name = out_file) else: network.predict(test_data.get_data(), results_folder = config.get('General', 'directory')+config.get('Testing', 'prediction_folder'), name = config.get('Testing', 'prediction_file')) pred_time = time.clock() - starttime #------------------------------------------------------------------------------ print "Prediction Time = " + `pred_time` + " seconds" test_data.close()
starttime = time.clock() network = CNN(num_layers=nlayers, num_filters=nfilters, filter_size=fsize, activation="relu") buildtime = time.clock() - starttime except: print name + ": build fail\n" batchsize = max_batch try: traintime = 0 chunck_size = int( round((log_interval * batchsize) ** (1.0 / 3.0)) + (network.net_shape.shape[0] * (network.net_shape[0, 1] - 1) + 1) ) netTrainer = Trainer( network, data.get_data()[0][0:chunck_size, 0:chunck_size, 0:chunck_size], data.get_labels()[0][:, 0:chunck_size, 0:chunck_size, 0:chunck_size], learning_method="ADAM", log_folder=folder, log_interval=log_interval, ) starttime = time.clock() netTrainer.train(1, False, False) traintime = time.clock() - starttime results.append([nlayers, nfilters, fsize, batchsize, buildtime, traintime]) print name + " training with batchsize " + ` batchsize ` + ": success\n" except: print name + " training with batchsize " + ` batchsize ` + ": fail\n" nfilters = max_filters
def trainnetwork(config_file): #Open configuration file for this network config = ConfigParser.ConfigParser() config.read(config_file) #Set the device on which to perform these computations device = config.get('General', 'device') theano.sandbox.cuda.use(device) if (device != 'cpu'): theano.config.nvcc.flags='-use=fast=math' theano.config.allow_gc=False #------------------------------------------------------------------------------ print '\nOpening Data Files' #Load the data for training training_data = LoadData(directory = config.get('Training Data', 'folders').split(','), data_file_name = config.get('Training Data', 'data_file'), label_file_name = config.get('Training Data', 'label_file'), seg_file_name = config.get('Training Data', 'seg_file')) #------------------------------------------------------------------------------ starttime=time.clock() #Create the network and trainer if os.path.exists(config.get('General', 'directory')+config.get('Network', 'weights_folder')): print 'Loading Network' network = CNN(weights_folder = config.get('General', 'directory')+config.get('Network', 'weights_folder'), activation = config.get('Network', 'activation')) print 'Loading Trainer' network_trainer = Trainer(network, training_data.get_data(), training_data.get_labels(), training_data.get_segments(), chunk_size = config.getint('Training', 'chunk_size'), batch_size = config.getint('Training', 'batch_size'), cost_func = config.get('Training', 'cost_func'), learning_method = config.get('Training', 'learning_method'), learning_rate = config.getfloat('Training', 'learning_rate'), beta1 = config.getfloat('Training', 'beta1'), beta2 = config.getfloat('Training', 'beta2'), damping = config.getfloat('Training', 'damping'), trainer_folder = config.get('General', 'directory')+config.get('Training', 'trainer_folder'), log_interval = config.getint('Training', 'log_interval'), log_folder = config.get('General', 'directory')+config.get('Training', 'log_folder')) else: print 'Initializing Network' network = CNN(num_layers = config.getint('Network', 'num_layers'), num_filters = config.getint('Network', 'num_filters'), filter_size = config.getint('Network', 'filter_size'), activation = config.get('Network', 'activation')) print 'Initializing Trainer' network_trainer = Trainer(network, training_data.get_data(), training_data.get_labels(), training_data.get_segments(), chunk_size = config.getint('Training', 'chunk_size'), batch_size = config.getint('Training', 'batch_size'), cost_func = config.get('Training', 'cost_func'), learning_method = config.get('Training', 'learning_method'), learning_rate = config.getfloat('Training', 'learning_rate'), beta1 = config.getfloat('Training', 'beta1'), beta2 = config.getfloat('Training', 'beta2'), damping = config.getfloat('Training', 'damping'), log_interval = config.getint('Training', 'log_interval'), log_folder = config.get('General', 'directory')+config.get('Training', 'log_folder')) init_time = time.clock() - starttime #------------------------------------------------------------------------------ print "Initialization = " + `init_time` + " seconds" starttime = time.clock() #Train the network print 'Training...\n' train_error = network_trainer.train(config.getint('Training', 'num_epochs'), config.getboolean('Training', 'early_stop'), config.getboolean('Training', 'print_updates')) total_time = time.clock() - starttime #------------------------------------------------------------------------------ print "Total Time =",total_time,"seconds" training_data.close() return train_error