コード例 #1
0
	def test_predict(self):
		# Test with dataset type: sklearn DataBunch
		experiment = Experiment('exp1', models=['rf', 'dt'], exp_type='classification')
		iris = load_iris()

		experiment.train(iris.data[:120], iris.target[:120])
		predictions = experiment.predict(iris.data[120:])
		actual = {
				'rf':
					[2, 2, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], 
				'dt':
					[2, 1, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]}
		self.assertEqual( predictions, actual )

		# Test with dataset type: pandas DataFrame
		experiment = Experiment('exp1', models=['rf', 'dt'], exp_type='classification')
		iris_df = load_iris(as_frame=True).frame
		X = iris_df.loc[:, iris_df.columns != 'target']
		y = iris_df['target']

		experiment.train(X.iloc[:120], y.iloc[:120])
		predictions = experiment.predict(X.iloc[120:])
		actual = {
				'rf':
					[2, 2, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], 
				'dt':
					[2, 1, 2, 1, 2, 2, 1, 1, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]}
		self.assertEqual( predictions, actual )
コード例 #2
0
	def test_init(self):
		experiments = Experiments()
		self.assertEqual( experiments.getNumOfExperiments(), 0 )
		self.assertEqual( experiments.getExperiments(), {} )

		try:
			experiments.runAllExperiments()

			fail(self)
		except ValueError as ve:
			self.assertEqual( str(ve), 'Experiments object has no models to run!')

		try:
			experiments.addExperiment('random forest')

			fail(self)
		except ValueError as ve:
			self.assertEqual( str(ve), 'Object must be Experiment object: random forest')

		try:
			experiments.addExperiment( Experiment(1) )

			fail(self)
		except ValueError as ve:
			self.assertEqual( str(ve), 'Experiment name attribute must be string, not <class \'int\'>' )
		self.assertEqual( experiments.getNumOfExperiments(), 0 )

		experiments.addExperiment( Experiment('1') )
		experiments.addExperiment( Experiment('2') )
		experiments.addExperiment( Experiment('3') )
		experiments.addExperiment( Experiment('4') )

		self.assertEqual( experiments.getNumOfExperiments(), 4 )
		self.assertEqual( experiments.getExperimentNames(), ['1', '2', '3', '4'] )
コード例 #3
0
ファイル: main.py プロジェクト: shuyangw/cs585-final-project
def range_train():
    sample_sizes = [1e6]
    for size in sample_sizes:
        exp = Experiment("leagueoflegends", size, 75)
        model, losses, iterations = exp.regular_train()
        exp.predict(model, 1000, "s", True)

        plt.title("Losses for size " + str(size))

        plt.plot(iterations, losses)
        plt.xlabel("Iteration")
        plt.ylabel("Loss")

        if not os.path.exists("plots/" + str(size) + ".png"):
            plt.savefig("plots/" + str(size) + ".png")
        else:
            count = 1
            while os.path.exists("plots/" + str(size) + ".png"):
                count += 1
            plt.savefig("plots/" + str(size) + ".png")

        print("#" * 80)
        print("FINAL LOSS FOR SIZE ", size, losses[len(losses) - 1])
        print("#" * 80)

        plt.clf()
コード例 #4
0
	def test_isValidExperimentType(self):
		experiment = Experiment()
		experiment_type = 'classification'
		self.assertEqual( experiment.isValidExperimentType(experiment_type), True )

		experiment_type = 'bloogus!'
		self.assertEqual( experiment.isValidExperimentType(experiment_type), False )
コード例 #5
0
ファイル: main.py プロジェクト: shuyangw/cs585-final-project
def custom():
    filename = "../kelvin"
    exp = Experiment(subreddit=None,
                     sample_size=0,
                     percentile=0,
                     custom=True,
                     custom_file=filename)
    model, losses, iterations = exp.regular_train(epochs=5)
    exp.predict(model, 100, "s", False)
コード例 #6
0
ファイル: main.py プロジェクト: shuyangw/cs585-final-project
def single_complete_train():
    exp = Experiment("leagueoflegends", None, 90)
    model, losses, iterations = exp.regular_train(epochs=1)
    exp.predict(model, 1000, "s", True)

    plt.title("Losses for size " + str(None))

    plt.plot(iterations, losses)
    plt.xlabel("Iteration")
    plt.ylabel("Loss")
コード例 #7
0
ファイル: thesis.py プロジェクト: braineniac/kalman_estimator
 def _set_experiments(self):
     for sys_IO, kalman_filter, legend in \
         zip(self._sys_IOs,
             self._kalman_filters,
             ThesisConfig.micro_dpsi_test_legend):
         experiment = Experiment(sys_IO, kalman_filter,
                                 ThesisConfig.micro_dpsi_test_slice, legend)
         self._experiments.append(experiment)
コード例 #8
0
    def test_cls_init(self):
        env = get_env()

        exp = Experiment(
            # random_seed=0,
            epochs=1,
            model_cls='models.transformers.JointBERT',
            model_params={
                'bert_model_path': env['bert_dir'] + '/bert-base-cased',
                'labels_count': 3,
            },
            loss_func_cls='torch.nn.BCELoss',  # loss,
            model_output_to_loss_input=lambda ys: ys.double(),
            data_helper_cls='wiki.data_helpers.JointBERTWikiDataHelper',
            data_helper_params={
                'wiki_relations_path': '../wiki/relations.csv',
                'wiki_articles_path': '../wiki/docs.pickle',
                'labels': ['employer', 'country_of_citizenship'],
                # 'employer' # 'capital' # 'country_of_citizenship' #'educated_at' # 'opposite_of'
                'label_col': 'relation_name',
                'negative_sampling_ratio': 1.,
                'train_test_split': 0.7,
                'max_seq_length': 512,
                'train_batch_size': 4,
                'test_batch_size': 4,
                'bert_model_path':
                '/Volumes/data/repo/data/bert/bert-base-cased',
                # 'bert_tokenizer_cls': '',
                'bert_tokenizer_params': {
                    'do_lower_case': False,
                },
                'df_limit': 3,
            },
            tqdm_cls='tqdm.tqdm',
            output_dir='../output',
        )

        assert isinstance(exp.model, JointBERT)
        assert isinstance(exp.data_helper, JointBERTWikiDataHelper)
        assert isinstance(exp.loss_func, BCELoss)
        assert tqdm == exp.tqdm_cls

        print(flatten(exp.to_dict()))

        exp.run()
コード例 #9
0
ファイル: thesis.py プロジェクト: braineniac/kalman_estimator
 def _set_experiments(self):
     sim_experiment = SimExperiment(self._sim, ThesisConfig.line_sim_slice,
                                    ThesisConfig.line_sim_ref_legend)
     self._experiments.append(sim_experiment)
     for kalman_filter, legend in \
             zip(self._kalman_filters, ThesisConfig.line_sim_kalman_legend):
         experiment = Experiment(self._sys_IOs, kalman_filter,
                                 ThesisConfig.line_sim_slice, legend)
         self._experiments.append(experiment)
コード例 #10
0
def _prepare_model(args_path, config_path, num_classes):
    with open(args_path) as i:
        args = json.load(i)

    with open(config_path) as i:
        model_cfg = json.load(i)

    experiment = Experiment(None, num_features, num_classes, None)

    model = create_model(args['model_type'], experiment, model_cfg)

    return model
コード例 #11
0
	def test_addModel(self):
		experiment = Experiment('experiment_1')
		experiment.addModel('rf')
		dt_model = Model('decision tree', DecisionTreeClassifier, {'random_state': 0, 'max_depth': 2})
		experiment.addModel(dt_model)

		models_dict = experiment.getModels()

		#print(models_dict)
		self.assertTrue( 'rf' in models_dict )
コード例 #12
0
	def test_setExperimentType(self):
		experiment = Experiment()
		experiment_type = 'bloogus!'
		try:
			experiment.setExperimentType(experiment_type)

			fail(self)
		except ValueError as ve:
			self.assertEqual( str(ve), 'Experiment must be \'regression\' or \'classification\', cannot be bloogus!' )

		experiment_type = 'classification'
		experiment.setExperimentType(experiment_type)
	   
		self.assertEqual( experiment.getExperimentType(), experiment_type )
コード例 #13
0
	def test_setRandomState(self):
		experiment = Experiment(name='experiment 1', models=['rf', 'dt'], exp_type='regression')
		models = experiment.getModels()
		self.assertEqual( models['rf'].getConstructorArgs()['random_state'], 0 )
		self.assertEqual( models['dt'].getConstructorArgs()['random_state'], 0 )

		experiment.setRandomState(1)

		models = experiment.getModels()
		self.assertEqual( models['rf'].getConstructorArgs()['random_state'], 1 )
		self.assertEqual( models['dt'].getConstructorArgs()['random_state'], 1 )
コード例 #14
0
ファイル: shell.py プロジェクト: yinyeya/training-material
 def do_add(self, arg_str):
     '''Use either:
        add experiment <description> <start_date>
        add researcher <last_name> <first_name>
        add sample <description> <experiment_id>'''
     try:
         type_str, attr1, attr2 = ExperimentShell.parse_add_arg(arg_str)
     except SyntaxException as e:
         print('*** {0}'.format(str(e)), file=sys.stderr)
         return
     if type_str == 'experiment':
         item = Experiment(description=attr1, start_date=attr2)
     elif type_str == 'researcher':
         item = Researcher(last_name=attr1, first_name=attr2)
     elif type_str == 'sample':
         item = Sample(description=attr1, experiment_id=attr2)
     self._db_session.add(item)
     self._db_session.commit()
コード例 #15
0
  def load(self):
    self.total_hits = 0
    self.active_hits = 0

    lvlset = self.args["lvlset"]
    _, lvls = loadBoogieLvlSet(lvlset)

    if self.lvls is None:
      self.lvls = lvls.keys()

    try:
      self.exp = Experiment(self.name)
    except IOError:
      print "Error loading experiment %s" % self.name
      # Ignore missing experiments
      return

    self.total_hits = len(self.exp.server_runs)
    self.active_hits = sum(sr.hit_id in hits and isHitActive(hits[sr.hit_id])
      for sr in self.exp.server_runs)
コード例 #16
0
def main():
    wrkdir = os.path.abspath(os.getcwd())
    datadir = init_dir(os.path.join(wrkdir, "data"))
    xcms_outs = init_dir(os.path.join(wrkdir, "xcms_results"))
    mzmine_outs = init_dir(os.path.join(wrkdir, "mzmine_results"))
    msdial_outs = init_dir(os.path.join(wrkdir, "msdial_results"))
    plotdir = os.path.join(wrkdir, "plots")
    
    chems, _, rt_path = Experiment.generate_chems(datadir, 500)
    with open(rt_path, 'r') as rts: min_rt, max_rt = map(float, rts.readline().split(','))
    
    def run_experiment(experiment_type, params, oratios, clean=False):
        exp = experiment_type(chems, min_rt, max_rt)
        exp_name = experiment_type.name.lower()
        
        def subdir(maindir): return init_dir(os.path.join(maindir, exp_name))
        xcms_params = XCMSParams(subdir(xcms_outs))
        mzmine_params = MZMineParams(subdir(mzmine_outs), os.path.join(wrkdir, "mzmine_template.xml"))
        msdial_params = MSDialParams("lcmsdda", subdir(datadir), subdir(msdial_outs), os.path.join(wrkdir, "msdialparam_dda.txt"))
        
        exp.generate_mzmls(subdir(datadir), params)
        xcms_filenames, mzmine_filenames, msdial_filenames = exp.run_peak_pickers(xcms_params=xcms_params, mzmine_params=mzmine_params, msdial_params=msdial_params)
        def results(): exp.match_peaks(plotdir, params, oratios, xcms_filenames=xcms_filenames, mzmine_filenames=mzmine_filenames, msdial_filenames=msdial_filenames, clean=clean)
        return results
    
    oratios = [0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
    
    static_params = [1.0, 2.0, 4.0, 6.0, 8.0, 16.0, 32.0]
    uniform_params = [(7.0, 7.0 / 20 * i) for i in range(20)]
    choice_params = [ls for ls in [[9] * i + [11] + [13] * i for i in range(1, 101, 10)]]
    recursive_params = [(v, p) for v in [1, 3, 5] for p in [0.1, 0.3, 0.5, 0.7, 0.9]]
    markov_params = [(min_rt, {"NOT_BUSY" : 0.6, "BUSY" : 10}, p) for p in [0.01, 0.03, 0.05, 0.1, 0.2]]
    params = [(StaticTimes, static_params), (UniformTimes, uniform_params), (ChoiceTimes, choice_params), (RecursiveTimes, recursive_params), (MarkovTimes, markov_params)]
    
    result_fns = [run_experiment(expt, expp, oratios, clean=(not i)) for i, (expt, expp) in enumerate(params)]
    for fn in result_fns: fn()
コード例 #17
0
ファイル: beta.py プロジェクト: andywrussell/BioCW1
def run_beta():
    params_pso = {
        "swarmsize": 40,
        "alpha": 1,
        "beta": 0,
        "gamma": 4.1,
        "delta": 0,
        "jumpsize": 1,
        "act_bound": 5,
        "weight_bound": 10,
        "bound_strat": 1,
        "num_informants": 3,
        "vel_range": 1,
        "max_runs": 1000,
        "informants_strat": 2
    }

    # net_layers = {
    #     "layer1": {
    #         "input_count":1,
    #         "node_count":1,
    #         "activations": []
    #     }
    # }

    # net_layers = {
    #     "layer1": {
    #         "input_count":2,
    #         "node_count":2,
    #         "activations": []
    #     },
    #     "layer2": {
    #         "input_count":2,
    #         "node_count": 1,
    #         "activations:":[]
    #     }
    # }

    net_layers = {
        "layer1": {
            "input_count":2,
            "node_count":2,
            "activations": []
        },
        "layer2": {
            "input_count":2,
            "node_count":2 ,
            "activations:":[]
        },
        "layer3": {
            "input_count":2,
            "node_count":1 ,
            "activations:":[]
        }
    }

    best_gamma = 0
    best_beta = 0
    best_error = None

    for j in range(0, 10):
        run_beta = 0
        run_gamma = 4.1
        run_best = None

        #first do 4.1 and 0
        params_pso["beta"] = 0
        params_pso["gamma"] = 4.1

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]

        #first do 0.5 and 3.6
        params_pso["beta"] = 0.5
        params_pso["gamma"] = 3.6

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]

        #first do 1 and 3.1
        params_pso["beta"] = 1.0
        params_pso["gamma"] = 3.1

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]


        #first do 1 and 3.1
        params_pso["beta"] = 1.5
        params_pso["gamma"] = 2.6

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]


        #first do 1 and 3.1
        params_pso["beta"] = 2.05
        params_pso["gamma"] = 2.05

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]

        #first do 1 and 3.1
        params_pso["beta"] = 2.6
        params_pso["gamma"] = 1.5

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]

        #first do 1 and 3.1
        params_pso["beta"] =  3.1
        params_pso["gamma"] = 1.0

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]


        #first do 1 and 3.1
        params_pso["beta"] =  0.5
        params_pso["gamma"] = 3.6

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]


        #first do 1 and 3.1
        params_pso["beta"] =  0.0
        params_pso["gamma"] = 4.1

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]

        print("\nRun ", j, " Beta: ", run_beta, " Gamma: ", run_gamma, " Error", run_best)

    print("\nOverall Beta: ", best_beta, " Gamma: ", best_gamma, " Error", best_error)
コード例 #18
0
from agents import RandomSearchAgent
from experiments import Experiment

import gym

seed = 16

env = gym.make('LunarLander-v2')
env.seed(seed)

ragent = RandomSearchAgent(name='RandomSearchAgent-1',
                           state_dim=env.observation_space.shape[0],
                           action_dim=env.action_space.n,
                           seed=seed,
                           stop_search_reward=210)
exp = Experiment(env, ragent, logdir="../log", verbose=True, num_episodes=1000)

exp.run()
    parser.add_argument('--save_embedding', action='store_true', help='')

    parser.add_argument('--plot_embeddings', action='store_true', help='')

    parser.add_argument('--prob_diff', action='store_true', help='')

    args = parser.parse_args()

    # Load all of the experiments
    all_experiments = []
    results_folders = []
    for i, name in enumerate(args.names):
        exp = Experiment(name,
                         args.quantize,
                         None,
                         start_it=args.checkpoint,
                         experiment_root=args.experiment_root)
        exp.load_experiment()
        results_folders.append(
            os.path.join(
                args.results_root,
                '%s_%d' % (exp.experiment_name, exp.current_iteration)))

        sampler = exp.get_jitted_sampler()
        encoder = exp.get_jitted_forward()
        decoder = exp.get_jitted_inverse()
        all_experiments.append((exp, sampler, encoder, decoder))

    # Save the plots to the results folder
    folder_name = '_'.join([
コード例 #20
0
                    # Calculate AngleError as long as the positional data is there
                    try: row.append(-trial.polar_error[1])
                    except TypeError: row.append("")

                    # Write the row to csvfile
                    writer.writerow(row)


# _________________________________ Main ________________________________________________

if __name__ == "__main__":

    # Initialize the experiment with a title from console
    title = input("Enter a name for the experiment: ")
    experiment = Experiment(title)

    # Map data into the experiment objects
    print("Pulling data...", end="")
    experiment.pull_data()
    print("Done.")

    # Write the data to <Title>_data_<Today>.csv
    file_name = "Output/{}_data_{}.csv".format(
                 experiment.name, datetime.datetime.today().date())
    print("Writing to {}...".format(file_name), end="")

    # ----- SELECT YOUR OUTPUT TYPE HERE ------------------------------------------------

    # Uncomment this function to generate the rich data CSV made by Alec
    write_to_csv(experiment, "../" + file_name)
コード例 #21
0
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense, Conv2D, MaxPooling2D
from keras.models import Model
from keras import metrics
from keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau

# Local imports
import models
from experiments import Experiment

if __name__ == '__main__':
    # Setup experiment
    experiment = Experiment('./classification_experiments')
    experiment.add_dir('checkpoints')
    experiment.add_dir('tensorboard')

    # Print information about GPUs
    print(device_lib.list_local_devices())

    # Load model
    model_name = config['MODEL_NAME']
    print('Loading model {}...'.format(model_name))

    if model_name in models.name_to_model:
        model_build_function = models.name_to_model[model_name]
        model = model_build_function(config['IMAGE_HEIGHT'],
                                     config['IMAGE_WIDTH'],
                                     config['N_CHANNELS'],
コード例 #22
0
ファイル: thesis.py プロジェクト: braineniac/kalman_estimator
 def _set_experiments(self):
     for kalman_filter, legend in \
             zip(self._kalman_filters, ThesisConfig.floor_legend):
         experiment = Experiment(self._sys_IOs[0], kalman_filter,
                                 ThesisConfig.floor_slice, legend)
         self._experiments.append(experiment)
コード例 #23
0
ファイル: Forge.py プロジェクト: sallensun/neural-mmo
    #Pass the tick thunk to a twisted WebSocket server
    god = trinity.god.remote(trinity, config, idx=0)
    model = Model(Policy, config).load(None, config.BEST).weights
    env = god.getEnv.remote()
    god.tick.remote(model)

    #Start a websocket server for rendering. This requires
    #forge/embyr, which is automatically downloaded from
    #jsuarez5341/neural-mmo-client in scripts/setup.sh
    from forge.embyr.twistedserver import Application
    Application(env, god.tick.remote)


if __name__ == '__main__':
    #Experiment + command line args specify configuration
    #Trinity specifies Cluster-Server-Core infra modules
    config = Experiment('pop', Config).init()
    trinity = Trinity(Pantheon, God, Sword)
    args = parseArgs(config)

    #Blocking call: switches execution to a
    #Web Socket Server module
    if args.render:
        render(trinity, config, args)

    #Train until AGI emerges
    trinity.init(config, args)
    while True:
        log = trinity.step()
        print(log)
コード例 #24
0
import torch.nn as nn
import numpy as np
import copy
from experiments import Experiment
import os
from device import device
from configs.double_dqn_dense import HYPERPARAMS, model, target_model, loss_fn, optimizer, batch_size, discount_factor, target_model, replay_buffer_length, learning_rate, \
loss_fn, optimizer, no_episodes, no_episodes_to_reach_epsilon, min_epsilon, no_episodes_before_training, no_episodes_before_updating_target, use_double_dqn, snapshot_game_every_n_episodes, no_episodes_to_fill_up_existing_model_replay_buffer

pp = pprint.PrettyPrinter(indent=4)
job_name = input("What is the job name: ")

if job_name:
    experiment = Experiment(
        python_file_name=os.path.basename(__file__),
        folder_name=job_name,
        model=model,
    )
else:
    experiment = Experiment(
        python_file_name=os.path.basename(__file__),
        model=model,
    )

experiment.add_hyperparameter(HYPERPARAMS)
pp.pprint(experiment.hyperparameters)

model_path = ""

if model_path:
    model = torch.load(model_path)
コード例 #25
0
ファイル: final.py プロジェクト: andywrussell/BioCW1
def run_final():
    print("\Final Cubic")
    print("=======================")

    params_pso = {
        "swarmsize": 40,
        "alpha": 1,
        "beta": 0,
        "gamma": 4.1,
        "delta": 0,
        "jumpsize": 1,
        "act_bound": 5,
        "weight_bound": 10,
        "bound_strat": 1,
        "num_informants": 3,
        "vel_range": 1,
        "max_runs": 1000,
        "informants_strat": 2
    }

    net_single = {
        "layer1": {
            "input_count": 1,
            "node_count": 1,
            "activations": []
        }
    }

    exp1 = 0
    for i in range(0, 10):
        print("\nRun ", i)
        experiment1 = Experiment(params_pso,
                                 net_single,
                                 path="1in_cubic.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment1.run()
        exp1 += experiment1.pso.best.fitness

    print("\nMse for final on cubic", exp1 / 10)

    params_pso["beta"] = 0.5
    params_pso["gamma"] = 3.6

    print("\Final Linear")
    print("=======================")
    exp2 = 0
    for i in range(0, 10):
        print("\nRun ", i)
        experiment2 = Experiment(params_pso,
                                 net_single,
                                 path="1in_linear.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment2.run()
        exp2 += experiment2.pso.best.fitness

    print("\nMse for final on linear", exp2 / 10)

    params_pso["beta"] = 0
    params_pso["gamma"] = 4.1

    print("\Final Sine")
    print("=======================")
    exp3 = 0
    for i in range(0, 10):
        print("Run ", i, "\n")
        experiment3 = Experiment(params_pso,
                                 net_single,
                                 path="1in_sine.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment3.run()
        exp3 += experiment3.pso.best.fitness

    print("\nMse for final on Sine", exp3 / 10)

    net_layers = {
        "layer1": {
            "input_count": 1,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    params_pso["beta"] = 0
    params_pso["gamma"] = 4.1

    print("\nFinal Tanh")
    print("=======================")
    exp4 = 0
    for i in range(0, 10):
        print("Run ", i, "\n")
        experiment4 = Experiment(params_pso,
                                 net_layers,
                                 path="1in_tanh.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment4.run()
        exp4 += experiment4.pso.best.fitness

    print("\nMse for final on Tanh", exp4 / 10)

    net_layers = {
        "layer1": {
            "input_count": 2,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    params_pso["beta"] = 0
    params_pso["gamma"] = 4.1

    print("\nFinal XOR")
    print("=======================")
    exp6 = 0
    for i in range(0, 10):
        print("\nRun ", i)
        experiment6 = Experiment(params_pso,
                                 net_layers,
                                 path="2in_xor.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment6.run()
        exp6 += experiment6.pso.best.fitness

    print("\nMse for final on XOR", exp6 / 10)

    print("\nFinal Complex")
    print("=======================")

    net_complex = {
        "layer1": {
            "input_count": 2,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 2,
            "activations:": []
        },
        "layer3": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    params_pso["beta"] = 2.05
    params_pso["gamma"] = 2.05

    exp5 = 0
    for i in range(0, 10):
        print("\nRun ", i, "\n")
        experiment5 = Experiment(params_pso,
                                 net_complex,
                                 path="2in_complex.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment5.run()
        exp5 += experiment5.pso.best.fitness

    print("\nMse for final on Complex", exp5 / 10)
コード例 #26
0
    # configurations = [(0.3, 1.0)]

    # In the meta file, we need:
    # - iteration number
    # - settings
    #    - path
    #    - s
    #    - t
    #    - score

    # Load all of the experiments
    for name in tqdm(args.names):
        exp = Experiment(name,
                         args.quantize,
                         None,
                         start_it=-1,
                         experiment_root=args.experiment_root)
        exp.load_experiment()
        sampler = exp.get_jitted_sampler()

        # Create the folder for the FID score
        experiment_fid_folder = os.path.join(fid_path, exp.experiment_name)
        pathlib.Path(experiment_fid_folder).mkdir(parents=True, exist_ok=True)

        # Extract the meta data
        meta_path = os.path.join(experiment_fid_folder, 'meta.yaml')
        if (os.path.exists(meta_path) == False):
            meta = {}
        else:
            # If the iteration number has changed, then we need to re-initialize
コード例 #27
0
ファイル: swarmsize.py プロジェクト: andywrussell/BioCW1
def run_swarmsize():
    print("\Swarmsize Cubic")
    print("=======================")

    params_pso = {
        "swarmsize": 0,
        "alpha": 1,
        "beta": 2.05,
        "gamma": 2.05,
        "delta": 0,
        "jumpsize": 1,
        "act_bound": 5,
        "weight_bound": 10,
        "bound_strat": 1,
        "num_informants": 3,
        "vel_range": 1,
        "max_runs": 1000,
        "informants_strat": 2
    }

    net_single = {
        "layer1": {
            "input_count": 1,
            "node_count": 1,
            "activations": []
        }
    }
    cubic_optimal_size = 0
    cubic_best = None
    for j in range(0, 10):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        cubic_optimal_size = 0
        cubic_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            print(params_pso["swarmsize"])
            experiment1 = Experiment(params_pso,
                                     net_single,
                                     path="1in_cubic.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (cubic_best == None
                    or experiment1.pso.best.fitness < cubic_best):
                cubic_best = experiment1.pso.best.fitness
                cubic_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", cubic_optimal_size, " produced",
              cubic_best)

    print("Cubic optimal size ", cubic_optimal_size, " produced", cubic_best)

    print("\Swarmsize Linear")
    print("=======================")
    linear_optimal_size = 0
    linear_best = None
    for j in range(0, 1):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        linear_optimal_size = 0
        linear_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            experiment1 = Experiment(params_pso,
                                     net_single,
                                     path="1in_linear.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (linear_best == None
                    or experiment1.pso.best.fitness < linear_best):
                linear_best = experiment1.pso.best.fitness
                linear_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", linear_optimal_size, " produced",
              linear_best)

    print("linear optimal size ", linear_optimal_size, " produced",
          linear_best)

    print("\Swarmsize Sine")
    print("=======================")
    sine_optimal_size = 0
    sine_best = None
    for j in range(0, 1):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        sine_optimal_size = 0
        sine_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            experiment1 = Experiment(params_pso,
                                     net_single,
                                     path="1in_sine.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (sine_best == None or experiment1.pso.best.fitness < sine_best):
                sine_best = experiment1.pso.best.fitness
                sine_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", sine_optimal_size, " produced",
              sine_best)

    print("sine optimal size ", sine_optimal_size, " produced", sine_best)

    net_layers = {
        "layer1": {
            "input_count": 1,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    print("\Swarmsize Tanh")
    print("=======================")
    tanh_optimal_size = 0
    tanh_best = None
    for j in range(0, 1):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        tanh_optimal_size = 0
        tanh_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            experiment1 = Experiment(params_pso,
                                     net_layers,
                                     path="1in_tanh.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (tanh_best == None or experiment1.pso.best.fitness < tanh_best):
                tanh_best = experiment1.pso.best.fitness
                tanh_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", tanh_optimal_size, " produced",
              tanh_best)

    print("tanh optimal size ", tanh_optimal_size, " produced", tanh_best)

    print("\Swarmsize XOR")
    print("=======================")
    xor_optimal_size = 0
    xor_best = None
    for j in range(0, 1):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        xor_optimal_size = 0
        xor_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            experiment1 = Experiment(params_pso,
                                     net_layers,
                                     path="2in_xor.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (xor_best == None or experiment1.pso.best.fitness < xor_best):
                xor_best = experiment1.pso.best.fitness
                xor_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", xor_optimal_size, " produced",
              xor_best)

    print("xor optimal size ", xor_optimal_size, " produced", xor_best)

    net_complex = {
        "layer1": {
            "input_count": 2,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 2,
            "activations:": []
        },
        "layer3": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    print("\Swarmsize Complex")
    print("=======================")
    complex_optimal_size = 0
    complex_best = None
    for j in range(0, 1):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        complex_optimal_size = 0
        complex_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            experiment1 = Experiment(params_pso,
                                     net_complex,
                                     path="2in_complex.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (complex_best == None
                    or experiment1.pso.best.fitness < complex_best):
                complex_best = experiment1.pso.best.fitness
                complex_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", complex_optimal_size, " produced",
              complex_best)

    print("complex optimal size ", complex_optimal_size, " produced",
          complex_best)
コード例 #28
0
def main_worker(ngpus_per_node, args):

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    experiment = Experiment(args)

    # Create file and store in args.data directory...
    filename = f'{experiment.name}.csv'
    print("Creating {} to store accuracy results".format(filename))
    results = open(filename, 'w+')
    results.write("Epoch,Top1,Top5\n")
    results.close()

    # create model
    model = modelling.get_model(args, True)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    cudnn.benchmark = True

    # Get Transformations.
    transform, additional_transform, validation_transform = experiment.get_transformation_set(
    )

    # Get Data
    train_dataset, val_dataset = experiment.get_data_set(
        transform, additional_transform, validation_transform)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # Evaluate loaded model.
    if args.evaluate:
        modelling.validate(val_loader, model, criterion, args, filename)
        return

    # Train your own model
    if args.own == None:
        modelling.train_model(model, train_dataset, val_loader,
                              args.start_epoch, args.epochs, optimizer,
                              criterion, filename, experiment.name, args)

    # Fine Tune the model.
    if args.finetune:

        if args.arch.startswith('vgg'):
            number_of_features = model.classifier[6].in_features
            model.classifier[6] = nn.Linear(number_of_features,
                                            len(train_dataset.classes))

            model.classifier[6] = model.classifier[6].cuda()
        else:
            number_of_features = model.module.fc.in_features
            model.module.fc = nn.Linear(number_of_features,
                                        len(train_dataset.classes))

            model.module.fc = model.module.fc.cuda()

        # only fc layer is are being updated.
        optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad,
                                           model.parameters()),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)

        modelling.train_model(model, train_dataset, val_loader,
                              args.start_epoch, args.epochs, optimizer,
                              criterion, filename, experiment.name, args)

    if args.savemodel:
        drive_path = f'/content/gdrive/My Drive/{experiment.name}.pth.tar'
        shutil.copyfile(f'{experiment.name}.pth.tar', drive_path)
    if args.savecsv:
        drive_path = f'/content/gdrive/My Drive/{experiment.name}.csv'
        shutil.copyfile(f'{experiment.name}.csv', drive_path)
コード例 #29
0
net_layers = {
    "layer1": {
        "input_count": 2,
        "node_count": 2,
        "activations": []
    },
    "layer2": {
        "input_count": 2,
        "node_count": 1,
        "activations:": []
    }
}

xorexp = Experiment(params_pso,
                    net_layers,
                    path="2in_xor.txt",
                    debugMode=False,
                    sampleMode=True)
xorexp.run()

##Single iterations of base experiments commented below
# net_single = {
#     "layer1": {
#         "input_count":1,
#         "node_count":1,
#         "activations": []
#     }
# }

# cubicexp = Experiment(params_pso, net_single, path="1in_cubic.txt", debugMode=False, sampleMode=True)
# cubicexp.run()
コード例 #30
0
        #agent.show_attrs()
    tf.reset_default_graph()


##############################################
##                   MAIN
##############################################
if __name__ == "__main__":
    # Parse arguments
    args = vars(parser.parse_args())

    if 'exp' in args['mode']:
        # Experiment mode. We will run more than one experiment (Experiments.py)
        exp_name = args['mode']
        experiment = Experiment(exp_name, args['parallel'])
        args_list = experiment.get_args_list()
    else:
        # Not in experiment mode means that we are only running one experiment
        args_list = [args]

    if args['parallel'] == 0:
        # Execute experiments sequentially
        for args_ in args_list:
            execute_experiment(args_)
    else:
        # Execute experiments in parallel
        from multiprocessing import Pool
        n_processes = args['parallel']
        with Pool(n_processes) as pool:
            pool.starmap(execute_experiment, zip(args_list))