def test_init(self): experiments = Experiments() self.assertEqual( experiments.getNumOfExperiments(), 0 ) self.assertEqual( experiments.getExperiments(), {} ) try: experiments.runAllExperiments() fail(self) except ValueError as ve: self.assertEqual( str(ve), 'Experiments object has no models to run!') try: experiments.addExperiment('random forest') fail(self) except ValueError as ve: self.assertEqual( str(ve), 'Object must be Experiment object: random forest') try: experiments.addExperiment( Experiment(1) ) fail(self) except ValueError as ve: self.assertEqual( str(ve), 'Experiment name attribute must be string, not <class \'int\'>' ) self.assertEqual( experiments.getNumOfExperiments(), 0 ) experiments.addExperiment( Experiment('1') ) experiments.addExperiment( Experiment('2') ) experiments.addExperiment( Experiment('3') ) experiments.addExperiment( Experiment('4') ) self.assertEqual( experiments.getNumOfExperiments(), 4 ) self.assertEqual( experiments.getExperimentNames(), ['1', '2', '3', '4'] )
def test_predict(self): # Test with dataset type: sklearn DataBunch experiment = Experiment('exp1', models=['rf', 'dt'], exp_type='classification') iris = load_iris() experiment.train(iris.data[:120], iris.target[:120]) predictions = experiment.predict(iris.data[120:]) actual = { 'rf': [2, 2, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], 'dt': [2, 1, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]} self.assertEqual( predictions, actual ) # Test with dataset type: pandas DataFrame experiment = Experiment('exp1', models=['rf', 'dt'], exp_type='classification') iris_df = load_iris(as_frame=True).frame X = iris_df.loc[:, iris_df.columns != 'target'] y = iris_df['target'] experiment.train(X.iloc[:120], y.iloc[:120]) predictions = experiment.predict(X.iloc[120:]) actual = { 'rf': [2, 2, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], 'dt': [2, 1, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]} self.assertEqual( predictions, actual )
def test_train(self): # Test with input data type: sklearn DataBunch experiment = Experiment('exp1', models=['rf', 'dt'], exp_type='classification') iris = load_iris() experiment.train(iris.data[:120], iris.target[:120]) try: experiment.train(iris.data[:120], iris.target[:12]) fail(self) except ValueError as ve: self.assertEqual( str(ve), 'Data and target provided to \'exp1\' must be same length:\n\tlen of data: 120\n\tlen of target: 12' ) experiment = Experiment(models=['rf', 'dt'], exp_type='classification') experiment.train(iris.data[:120], iris.target[:120]) try: experiment.train(iris.data[:47], iris.target) fail(self) except ValueError as ve: self.assertEqual( str(ve), 'Data and target provided to \'unnamed_experiment\' must be same length:\n\tlen of data: 47\n\tlen of target: 150' ) # Test with input data type: pandas DataFrame experiment = Experiment('exp1', models=['rf', 'dt'], exp_type='classification') iris_df = load_iris(as_frame=True).frame X = iris_df.loc[:, iris_df.columns != 'target'] y = iris_df['target'] experiment.train(X, y) try: experiment.train(X, y.sample(120)) fail(self) except ValueError as ve: self.assertEqual( str(ve), 'Data and target provided to \'exp1\' must be same length:\n\tlen of data: 150\n\tlen of target: 120' ) experiment = Experiment(models=['rf', 'dt'], exp_type='classification') experiment.train(X.sample(120), y.sample(120)) try: experiment.train(X.sample(120), y) fail(self) except ValueError as ve: self.assertEqual( str(ve), 'Data and target provided to \'unnamed_experiment\' must be same length:\n\tlen of data: 120\n\tlen of target: 150' )
def range_train(): sample_sizes = [1e6] for size in sample_sizes: exp = Experiment("leagueoflegends", size, 75) model, losses, iterations = exp.regular_train() exp.predict(model, 1000, "s", True) plt.title("Losses for size " + str(size)) plt.plot(iterations, losses) plt.xlabel("Iteration") plt.ylabel("Loss") if not os.path.exists("plots/" + str(size) + ".png"): plt.savefig("plots/" + str(size) + ".png") else: count = 1 while os.path.exists("plots/" + str(size) + ".png"): count += 1 plt.savefig("plots/" + str(size) + ".png") print("#" * 80) print("FINAL LOSS FOR SIZE ", size, losses[len(losses) - 1]) print("#" * 80) plt.clf()
def test_isValidExperimentType(self): experiment = Experiment() experiment_type = 'classification' self.assertEqual( experiment.isValidExperimentType(experiment_type), True ) experiment_type = 'bloogus!' self.assertEqual( experiment.isValidExperimentType(experiment_type), False )
def _set_experiments(self): for sys_IO, kalman_filter, legend in \ zip(self._sys_IOs, self._kalman_filters, ThesisConfig.micro_dpsi_test_legend): experiment = Experiment(sys_IO, kalman_filter, ThesisConfig.micro_dpsi_test_slice, legend) self._experiments.append(experiment)
def custom(): filename = "../kelvin" exp = Experiment(subreddit=None, sample_size=0, percentile=0, custom=True, custom_file=filename) model, losses, iterations = exp.regular_train(epochs=5) exp.predict(model, 100, "s", False)
def _set_experiments(self): sim_experiment = SimExperiment(self._sim, ThesisConfig.line_sim_slice, ThesisConfig.line_sim_ref_legend) self._experiments.append(sim_experiment) for kalman_filter, legend in \ zip(self._kalman_filters, ThesisConfig.line_sim_kalman_legend): experiment = Experiment(self._sys_IOs, kalman_filter, ThesisConfig.line_sim_slice, legend) self._experiments.append(experiment)
def single_complete_train(): exp = Experiment("leagueoflegends", None, 90) model, losses, iterations = exp.regular_train(epochs=1) exp.predict(model, 1000, "s", True) plt.title("Losses for size " + str(None)) plt.plot(iterations, losses) plt.xlabel("Iteration") plt.ylabel("Loss")
def test_addModel(self): experiment = Experiment('experiment_1') experiment.addModel('rf') dt_model = Model('decision tree', DecisionTreeClassifier, {'random_state': 0, 'max_depth': 2}) experiment.addModel(dt_model) models_dict = experiment.getModels() #print(models_dict) self.assertTrue( 'rf' in models_dict )
def test_setRandomState(self): experiment = Experiment(name='experiment 1', models=['rf', 'dt'], exp_type='regression') models = experiment.getModels() self.assertEqual( models['rf'].getConstructorArgs()['random_state'], 0 ) self.assertEqual( models['dt'].getConstructorArgs()['random_state'], 0 ) experiment.setRandomState(1) models = experiment.getModels() self.assertEqual( models['rf'].getConstructorArgs()['random_state'], 1 ) self.assertEqual( models['dt'].getConstructorArgs()['random_state'], 1 )
def _prepare_model(args_path, config_path, num_classes): with open(args_path) as i: args = json.load(i) with open(config_path) as i: model_cfg = json.load(i) experiment = Experiment(None, num_features, num_classes, None) model = create_model(args['model_type'], experiment, model_cfg) return model
def test_setExperimentType(self): experiment = Experiment() experiment_type = 'bloogus!' try: experiment.setExperimentType(experiment_type) fail(self) except ValueError as ve: self.assertEqual( str(ve), 'Experiment must be \'regression\' or \'classification\', cannot be bloogus!' ) experiment_type = 'classification' experiment.setExperimentType(experiment_type) self.assertEqual( experiment.getExperimentType(), experiment_type )
def main(): # =============== # 1. config # =============== print('Loading config') config = get_config() print(config.model_file_name) # =============== # 2. data # =============== print('Reading data from %s' % config.in_path) data = Data(config=config) # =============== # 3. experiment # =============== exp = Experiment(data=data, config=config) print('Start training') exp.run(check_period=config.check_period, early_stop=config.early_stop, patience=config.patience) # =============== # 4. test # =============== if 'wn18' in config.in_path: # Sanity test on wn18 exp.show_link_prediction(h='06845599', t='03754979', r='_member_of_domain_usage', raw=True) if 'fb15k' in config.in_path: # Sanity test on fb15k exp.show_link_prediction( h='/m/08966', t='/m/05lf_', r= '/travel/travel_destination/climate./travel/travel_destination_monthly_climate/month', raw=True) if 'KG30C' in config.in_path: # Sanity test exp.show_link_prediction(h='7F74B998', t='019EC1A3', r='paper_in_domain', raw=True) if 'KG94C' in config.in_path: # Sanity test exp.show_link_prediction(h='7E52972F', t='80D75AD7', r='author_write_paper', raw=True)
def test_cls_init(self): env = get_env() exp = Experiment( # random_seed=0, epochs=1, model_cls='models.transformers.JointBERT', model_params={ 'bert_model_path': env['bert_dir'] + '/bert-base-cased', 'labels_count': 3, }, loss_func_cls='torch.nn.BCELoss', # loss, model_output_to_loss_input=lambda ys: ys.double(), data_helper_cls='wiki.data_helpers.JointBERTWikiDataHelper', data_helper_params={ 'wiki_relations_path': '../wiki/relations.csv', 'wiki_articles_path': '../wiki/docs.pickle', 'labels': ['employer', 'country_of_citizenship'], # 'employer' # 'capital' # 'country_of_citizenship' #'educated_at' # 'opposite_of' 'label_col': 'relation_name', 'negative_sampling_ratio': 1., 'train_test_split': 0.7, 'max_seq_length': 512, 'train_batch_size': 4, 'test_batch_size': 4, 'bert_model_path': '/Volumes/data/repo/data/bert/bert-base-cased', # 'bert_tokenizer_cls': '', 'bert_tokenizer_params': { 'do_lower_case': False, }, 'df_limit': 3, }, tqdm_cls='tqdm.tqdm', output_dir='../output', ) assert isinstance(exp.model, JointBERT) assert isinstance(exp.data_helper, JointBERTWikiDataHelper) assert isinstance(exp.loss_func, BCELoss) assert tqdm == exp.tqdm_cls print(flatten(exp.to_dict())) exp.run()
def do_add(self, arg_str): '''Use either: add experiment <description> <start_date> add researcher <last_name> <first_name> add sample <description> <experiment_id>''' try: type_str, attr1, attr2 = ExperimentShell.parse_add_arg(arg_str) except SyntaxException as e: print('*** {0}'.format(str(e)), file=sys.stderr) return if type_str == 'experiment': item = Experiment(description=attr1, start_date=attr2) elif type_str == 'researcher': item = Researcher(last_name=attr1, first_name=attr2) elif type_str == 'sample': item = Sample(description=attr1, experiment_id=attr2) self._db_session.add(item) self._db_session.commit()
def load(self): self.total_hits = 0 self.active_hits = 0 lvlset = self.args["lvlset"] _, lvls = loadBoogieLvlSet(lvlset) if self.lvls is None: self.lvls = lvls.keys() try: self.exp = Experiment(self.name) except IOError: print "Error loading experiment %s" % self.name # Ignore missing experiments return self.total_hits = len(self.exp.server_runs) self.active_hits = sum(sr.hit_id in hits and isHitActive(hits[sr.hit_id]) for sr in self.exp.server_runs)
parser.add_argument('--save_embedding', action='store_true', help='') parser.add_argument('--plot_embeddings', action='store_true', help='') parser.add_argument('--prob_diff', action='store_true', help='') args = parser.parse_args() # Load all of the experiments all_experiments = [] results_folders = [] for i, name in enumerate(args.names): exp = Experiment(name, args.quantize, None, start_it=args.checkpoint, experiment_root=args.experiment_root) exp.load_experiment() results_folders.append( os.path.join( args.results_root, '%s_%d' % (exp.experiment_name, exp.current_iteration))) sampler = exp.get_jitted_sampler() encoder = exp.get_jitted_forward() decoder = exp.get_jitted_inverse() all_experiments.append((exp, sampler, encoder, decoder)) # Save the plots to the results folder folder_name = '_'.join([
# Calculate AngleError as long as the positional data is there try: row.append(-trial.polar_error[1]) except TypeError: row.append("") # Write the row to csvfile writer.writerow(row) # _________________________________ Main ________________________________________________ if __name__ == "__main__": # Initialize the experiment with a title from console title = input("Enter a name for the experiment: ") experiment = Experiment(title) # Map data into the experiment objects print("Pulling data...", end="") experiment.pull_data() print("Done.") # Write the data to <Title>_data_<Today>.csv file_name = "Output/{}_data_{}.csv".format( experiment.name, datetime.datetime.today().date()) print("Writing to {}...".format(file_name), end="") # ----- SELECT YOUR OUTPUT TYPE HERE ------------------------------------------------ # Uncomment this function to generate the rich data CSV made by Alec write_to_csv(experiment, "../" + file_name)
from keras import applications from keras.preprocessing.image import ImageDataGenerator from keras import optimizers from keras.models import Sequential from keras.layers import Dropout, Flatten, Dense, Conv2D, MaxPooling2D from keras.models import Model from keras import metrics from keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau # Local imports import models from experiments import Experiment if __name__ == '__main__': # Setup experiment experiment = Experiment('./classification_experiments') experiment.add_dir('checkpoints') experiment.add_dir('tensorboard') # Print information about GPUs print(device_lib.list_local_devices()) # Load model model_name = config['MODEL_NAME'] print('Loading model {}...'.format(model_name)) if model_name in models.name_to_model: model_build_function = models.name_to_model[model_name] model = model_build_function(config['IMAGE_HEIGHT'], config['IMAGE_WIDTH'], config['N_CHANNELS'],
def _set_experiments(self): for kalman_filter, legend in \ zip(self._kalman_filters, ThesisConfig.floor_legend): experiment = Experiment(self._sys_IOs[0], kalman_filter, ThesisConfig.floor_slice, legend) self._experiments.append(experiment)
def run_final(): print("\Final Cubic") print("=======================") params_pso = { "swarmsize": 40, "alpha": 1, "beta": 0, "gamma": 4.1, "delta": 0, "jumpsize": 1, "act_bound": 5, "weight_bound": 10, "bound_strat": 1, "num_informants": 3, "vel_range": 1, "max_runs": 1000, "informants_strat": 2 } net_single = { "layer1": { "input_count": 1, "node_count": 1, "activations": [] } } exp1 = 0 for i in range(0, 10): print("\nRun ", i) experiment1 = Experiment(params_pso, net_single, path="1in_cubic.txt", debugMode=False, sampleMode=True) experiment1.run() exp1 += experiment1.pso.best.fitness print("\nMse for final on cubic", exp1 / 10) params_pso["beta"] = 0.5 params_pso["gamma"] = 3.6 print("\Final Linear") print("=======================") exp2 = 0 for i in range(0, 10): print("\nRun ", i) experiment2 = Experiment(params_pso, net_single, path="1in_linear.txt", debugMode=False, sampleMode=True) experiment2.run() exp2 += experiment2.pso.best.fitness print("\nMse for final on linear", exp2 / 10) params_pso["beta"] = 0 params_pso["gamma"] = 4.1 print("\Final Sine") print("=======================") exp3 = 0 for i in range(0, 10): print("Run ", i, "\n") experiment3 = Experiment(params_pso, net_single, path="1in_sine.txt", debugMode=False, sampleMode=True) experiment3.run() exp3 += experiment3.pso.best.fitness print("\nMse for final on Sine", exp3 / 10) net_layers = { "layer1": { "input_count": 1, "node_count": 2, "activations": [] }, "layer2": { "input_count": 2, "node_count": 1, "activations:": [] } } params_pso["beta"] = 0 params_pso["gamma"] = 4.1 print("\nFinal Tanh") print("=======================") exp4 = 0 for i in range(0, 10): print("Run ", i, "\n") experiment4 = Experiment(params_pso, net_layers, path="1in_tanh.txt", debugMode=False, sampleMode=True) experiment4.run() exp4 += experiment4.pso.best.fitness print("\nMse for final on Tanh", exp4 / 10) net_layers = { "layer1": { "input_count": 2, "node_count": 2, "activations": [] }, "layer2": { "input_count": 2, "node_count": 1, "activations:": [] } } params_pso["beta"] = 0 params_pso["gamma"] = 4.1 print("\nFinal XOR") print("=======================") exp6 = 0 for i in range(0, 10): print("\nRun ", i) experiment6 = Experiment(params_pso, net_layers, path="2in_xor.txt", debugMode=False, sampleMode=True) experiment6.run() exp6 += experiment6.pso.best.fitness print("\nMse for final on XOR", exp6 / 10) print("\nFinal Complex") print("=======================") net_complex = { "layer1": { "input_count": 2, "node_count": 2, "activations": [] }, "layer2": { "input_count": 2, "node_count": 2, "activations:": [] }, "layer3": { "input_count": 2, "node_count": 1, "activations:": [] } } params_pso["beta"] = 2.05 params_pso["gamma"] = 2.05 exp5 = 0 for i in range(0, 10): print("\nRun ", i, "\n") experiment5 = Experiment(params_pso, net_complex, path="2in_complex.txt", debugMode=False, sampleMode=True) experiment5.run() exp5 += experiment5.pso.best.fitness print("\nMse for final on Complex", exp5 / 10)
# configurations = [(0.3, 1.0)] # In the meta file, we need: # - iteration number # - settings # - path # - s # - t # - score # Load all of the experiments for name in tqdm(args.names): exp = Experiment(name, args.quantize, None, start_it=-1, experiment_root=args.experiment_root) exp.load_experiment() sampler = exp.get_jitted_sampler() # Create the folder for the FID score experiment_fid_folder = os.path.join(fid_path, exp.experiment_name) pathlib.Path(experiment_fid_folder).mkdir(parents=True, exist_ok=True) # Extract the meta data meta_path = os.path.join(experiment_fid_folder, 'meta.yaml') if (os.path.exists(meta_path) == False): meta = {} else: # If the iteration number has changed, then we need to re-initialize
from agents import RandomSearchAgent from experiments import Experiment import gym seed = 16 env = gym.make('LunarLander-v2') env.seed(seed) ragent = RandomSearchAgent(name='RandomSearchAgent-1', state_dim=env.observation_space.shape[0], action_dim=env.action_space.n, seed=seed, stop_search_reward=210) exp = Experiment(env, ragent, logdir="../log", verbose=True, num_episodes=1000) exp.run()
def run_beta(): params_pso = { "swarmsize": 40, "alpha": 1, "beta": 0, "gamma": 4.1, "delta": 0, "jumpsize": 1, "act_bound": 5, "weight_bound": 10, "bound_strat": 1, "num_informants": 3, "vel_range": 1, "max_runs": 1000, "informants_strat": 2 } # net_layers = { # "layer1": { # "input_count":1, # "node_count":1, # "activations": [] # } # } # net_layers = { # "layer1": { # "input_count":2, # "node_count":2, # "activations": [] # }, # "layer2": { # "input_count":2, # "node_count": 1, # "activations:":[] # } # } net_layers = { "layer1": { "input_count":2, "node_count":2, "activations": [] }, "layer2": { "input_count":2, "node_count":2 , "activations:":[] }, "layer3": { "input_count":2, "node_count":1 , "activations:":[] } } best_gamma = 0 best_beta = 0 best_error = None for j in range(0, 10): run_beta = 0 run_gamma = 4.1 run_best = None #first do 4.1 and 0 params_pso["beta"] = 0 params_pso["gamma"] = 4.1 experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True) experiment1.run() if (run_best == None or experiment1.pso.best.fitness < run_best): run_best = experiment1.pso.best.fitness run_beta = params_pso["beta"] run_gamma = params_pso["gamma"] #first do 0.5 and 3.6 params_pso["beta"] = 0.5 params_pso["gamma"] = 3.6 experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True) experiment1.run() if (run_best == None or experiment1.pso.best.fitness < run_best): run_best = experiment1.pso.best.fitness run_beta = params_pso["beta"] run_gamma = params_pso["gamma"] #first do 1 and 3.1 params_pso["beta"] = 1.0 params_pso["gamma"] = 3.1 experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True) experiment1.run() if (run_best == None or experiment1.pso.best.fitness < run_best): run_best = experiment1.pso.best.fitness run_beta = params_pso["beta"] run_gamma = params_pso["gamma"] #first do 1 and 3.1 params_pso["beta"] = 1.5 params_pso["gamma"] = 2.6 experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True) experiment1.run() if (run_best == None or experiment1.pso.best.fitness < run_best): run_best = experiment1.pso.best.fitness run_beta = params_pso["beta"] run_gamma = params_pso["gamma"] #first do 1 and 3.1 params_pso["beta"] = 2.05 params_pso["gamma"] = 2.05 experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True) experiment1.run() if (run_best == None or experiment1.pso.best.fitness < run_best): run_best = experiment1.pso.best.fitness run_beta = params_pso["beta"] run_gamma = params_pso["gamma"] #first do 1 and 3.1 params_pso["beta"] = 2.6 params_pso["gamma"] = 1.5 experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True) experiment1.run() if (run_best == None or experiment1.pso.best.fitness < run_best): run_best = experiment1.pso.best.fitness run_beta = params_pso["beta"] run_gamma = params_pso["gamma"] #first do 1 and 3.1 params_pso["beta"] = 3.1 params_pso["gamma"] = 1.0 experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True) experiment1.run() if (run_best == None or experiment1.pso.best.fitness < run_best): run_best = experiment1.pso.best.fitness run_beta = params_pso["beta"] run_gamma = params_pso["gamma"] #first do 1 and 3.1 params_pso["beta"] = 0.5 params_pso["gamma"] = 3.6 experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True) experiment1.run() if (run_best == None or experiment1.pso.best.fitness < run_best): run_best = experiment1.pso.best.fitness run_beta = params_pso["beta"] run_gamma = params_pso["gamma"] #first do 1 and 3.1 params_pso["beta"] = 0.0 params_pso["gamma"] = 4.1 experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True) experiment1.run() if (run_best == None or experiment1.pso.best.fitness < run_best): run_best = experiment1.pso.best.fitness run_beta = params_pso["beta"] run_gamma = params_pso["gamma"] print("\nRun ", j, " Beta: ", run_beta, " Gamma: ", run_gamma, " Error", run_best) print("\nOverall Beta: ", best_beta, " Gamma: ", best_gamma, " Error", best_error)
def main_worker(ngpus_per_node, args): if args.gpu is not None: print("Use GPU: {} for training".format(args.gpu)) experiment = Experiment(args) # Create file and store in args.data directory... filename = f'{experiment.name}.csv' print("Creating {} to store accuracy results".format(filename)) results = open(filename, 'w+') results.write("Epoch,Top1,Top5\n") results.close() # create model model = modelling.get_model(args, True) # define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss().cuda(args.gpu) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) cudnn.benchmark = True # Get Transformations. transform, additional_transform, validation_transform = experiment.get_transformation_set( ) # Get Data train_dataset, val_dataset = experiment.get_data_set( transform, additional_transform, validation_transform) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) # Evaluate loaded model. if args.evaluate: modelling.validate(val_loader, model, criterion, args, filename) return # Train your own model if args.own == None: modelling.train_model(model, train_dataset, val_loader, args.start_epoch, args.epochs, optimizer, criterion, filename, experiment.name, args) # Fine Tune the model. if args.finetune: if args.arch.startswith('vgg'): number_of_features = model.classifier[6].in_features model.classifier[6] = nn.Linear(number_of_features, len(train_dataset.classes)) model.classifier[6] = model.classifier[6].cuda() else: number_of_features = model.module.fc.in_features model.module.fc = nn.Linear(number_of_features, len(train_dataset.classes)) model.module.fc = model.module.fc.cuda() # only fc layer is are being updated. optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) modelling.train_model(model, train_dataset, val_loader, args.start_epoch, args.epochs, optimizer, criterion, filename, experiment.name, args) if args.savemodel: drive_path = f'/content/gdrive/My Drive/{experiment.name}.pth.tar' shutil.copyfile(f'{experiment.name}.pth.tar', drive_path) if args.savecsv: drive_path = f'/content/gdrive/My Drive/{experiment.name}.csv' shutil.copyfile(f'{experiment.name}.csv', drive_path)
def run_swarmsize(): print("\Swarmsize Cubic") print("=======================") params_pso = { "swarmsize": 0, "alpha": 1, "beta": 2.05, "gamma": 2.05, "delta": 0, "jumpsize": 1, "act_bound": 5, "weight_bound": 10, "bound_strat": 1, "num_informants": 3, "vel_range": 1, "max_runs": 1000, "informants_strat": 2 } net_single = { "layer1": { "input_count": 1, "node_count": 1, "activations": [] } } cubic_optimal_size = 0 cubic_best = None for j in range(0, 10): params_pso["swarmsize"] = 0 print("\nRun ", j) cubic_optimal_size = 0 cubic_best = None for i in range(0, 10): params_pso["swarmsize"] += 10 print(params_pso["swarmsize"]) experiment1 = Experiment(params_pso, net_single, path="1in_cubic.txt", debugMode=False, sampleMode=True) experiment1.run() if (cubic_best == None or experiment1.pso.best.fitness < cubic_best): cubic_best = experiment1.pso.best.fitness cubic_optimal_size = params_pso["swarmsize"] print("\nRun ", j, "best size", cubic_optimal_size, " produced", cubic_best) print("Cubic optimal size ", cubic_optimal_size, " produced", cubic_best) print("\Swarmsize Linear") print("=======================") linear_optimal_size = 0 linear_best = None for j in range(0, 1): params_pso["swarmsize"] = 0 print("\nRun ", j) linear_optimal_size = 0 linear_best = None for i in range(0, 10): params_pso["swarmsize"] += 10 experiment1 = Experiment(params_pso, net_single, path="1in_linear.txt", debugMode=False, sampleMode=True) experiment1.run() if (linear_best == None or experiment1.pso.best.fitness < linear_best): linear_best = experiment1.pso.best.fitness linear_optimal_size = params_pso["swarmsize"] print("\nRun ", j, "best size", linear_optimal_size, " produced", linear_best) print("linear optimal size ", linear_optimal_size, " produced", linear_best) print("\Swarmsize Sine") print("=======================") sine_optimal_size = 0 sine_best = None for j in range(0, 1): params_pso["swarmsize"] = 0 print("\nRun ", j) sine_optimal_size = 0 sine_best = None for i in range(0, 10): params_pso["swarmsize"] += 10 experiment1 = Experiment(params_pso, net_single, path="1in_sine.txt", debugMode=False, sampleMode=True) experiment1.run() if (sine_best == None or experiment1.pso.best.fitness < sine_best): sine_best = experiment1.pso.best.fitness sine_optimal_size = params_pso["swarmsize"] print("\nRun ", j, "best size", sine_optimal_size, " produced", sine_best) print("sine optimal size ", sine_optimal_size, " produced", sine_best) net_layers = { "layer1": { "input_count": 1, "node_count": 2, "activations": [] }, "layer2": { "input_count": 2, "node_count": 1, "activations:": [] } } print("\Swarmsize Tanh") print("=======================") tanh_optimal_size = 0 tanh_best = None for j in range(0, 1): params_pso["swarmsize"] = 0 print("\nRun ", j) tanh_optimal_size = 0 tanh_best = None for i in range(0, 10): params_pso["swarmsize"] += 10 experiment1 = Experiment(params_pso, net_layers, path="1in_tanh.txt", debugMode=False, sampleMode=True) experiment1.run() if (tanh_best == None or experiment1.pso.best.fitness < tanh_best): tanh_best = experiment1.pso.best.fitness tanh_optimal_size = params_pso["swarmsize"] print("\nRun ", j, "best size", tanh_optimal_size, " produced", tanh_best) print("tanh optimal size ", tanh_optimal_size, " produced", tanh_best) print("\Swarmsize XOR") print("=======================") xor_optimal_size = 0 xor_best = None for j in range(0, 1): params_pso["swarmsize"] = 0 print("\nRun ", j) xor_optimal_size = 0 xor_best = None for i in range(0, 10): params_pso["swarmsize"] += 10 experiment1 = Experiment(params_pso, net_layers, path="2in_xor.txt", debugMode=False, sampleMode=True) experiment1.run() if (xor_best == None or experiment1.pso.best.fitness < xor_best): xor_best = experiment1.pso.best.fitness xor_optimal_size = params_pso["swarmsize"] print("\nRun ", j, "best size", xor_optimal_size, " produced", xor_best) print("xor optimal size ", xor_optimal_size, " produced", xor_best) net_complex = { "layer1": { "input_count": 2, "node_count": 2, "activations": [] }, "layer2": { "input_count": 2, "node_count": 2, "activations:": [] }, "layer3": { "input_count": 2, "node_count": 1, "activations:": [] } } print("\Swarmsize Complex") print("=======================") complex_optimal_size = 0 complex_best = None for j in range(0, 1): params_pso["swarmsize"] = 0 print("\nRun ", j) complex_optimal_size = 0 complex_best = None for i in range(0, 10): params_pso["swarmsize"] += 10 experiment1 = Experiment(params_pso, net_complex, path="2in_complex.txt", debugMode=False, sampleMode=True) experiment1.run() if (complex_best == None or experiment1.pso.best.fitness < complex_best): complex_best = experiment1.pso.best.fitness complex_optimal_size = params_pso["swarmsize"] print("\nRun ", j, "best size", complex_optimal_size, " produced", complex_best) print("complex optimal size ", complex_optimal_size, " produced", complex_best)
import torch.nn as nn import numpy as np import copy from experiments import Experiment import os from device import device from configs.double_dqn_dense import HYPERPARAMS, model, target_model, loss_fn, optimizer, batch_size, discount_factor, target_model, replay_buffer_length, learning_rate, \ loss_fn, optimizer, no_episodes, no_episodes_to_reach_epsilon, min_epsilon, no_episodes_before_training, no_episodes_before_updating_target, use_double_dqn, snapshot_game_every_n_episodes, no_episodes_to_fill_up_existing_model_replay_buffer pp = pprint.PrettyPrinter(indent=4) job_name = input("What is the job name: ") if job_name: experiment = Experiment( python_file_name=os.path.basename(__file__), folder_name=job_name, model=model, ) else: experiment = Experiment( python_file_name=os.path.basename(__file__), model=model, ) experiment.add_hyperparameter(HYPERPARAMS) pp.pprint(experiment.hyperparameters) model_path = "" if model_path: model = torch.load(model_path)
net_layers = { "layer1": { "input_count": 2, "node_count": 2, "activations": [] }, "layer2": { "input_count": 2, "node_count": 1, "activations:": [] } } xorexp = Experiment(params_pso, net_layers, path="2in_xor.txt", debugMode=False, sampleMode=True) xorexp.run() ##Single iterations of base experiments commented below # net_single = { # "layer1": { # "input_count":1, # "node_count":1, # "activations": [] # } # } # cubicexp = Experiment(params_pso, net_single, path="1in_cubic.txt", debugMode=False, sampleMode=True) # cubicexp.run()
#agent.show_attrs() tf.reset_default_graph() ############################################## ## MAIN ############################################## if __name__ == "__main__": # Parse arguments args = vars(parser.parse_args()) if 'exp' in args['mode']: # Experiment mode. We will run more than one experiment (Experiments.py) exp_name = args['mode'] experiment = Experiment(exp_name, args['parallel']) args_list = experiment.get_args_list() else: # Not in experiment mode means that we are only running one experiment args_list = [args] if args['parallel'] == 0: # Execute experiments sequentially for args_ in args_list: execute_experiment(args_) else: # Execute experiments in parallel from multiprocessing import Pool n_processes = args['parallel'] with Pool(n_processes) as pool: pool.starmap(execute_experiment, zip(args_list))