Exemplo n.º 1
0
def main():
    # ===============
    # 1. config
    # ===============
    print('Loading config')
    config = get_config()
    print(config.model_file_name)

    # ===============
    # 2. data
    # ===============
    print('Reading data from %s' % config.in_path)
    data = Data(config=config)

    # ===============
    # 3. experiment
    # ===============
    exp = Experiment(data=data, config=config)
    print('Start training')
    exp.run(check_period=config.check_period,
            early_stop=config.early_stop,
            patience=config.patience)

    # ===============
    # 4. test
    # ===============
    if 'wn18' in config.in_path:  # Sanity test on wn18
        exp.show_link_prediction(h='06845599',
                                 t='03754979',
                                 r='_member_of_domain_usage',
                                 raw=True)
    if 'fb15k' in config.in_path:  # Sanity test on fb15k
        exp.show_link_prediction(
            h='/m/08966',
            t='/m/05lf_',
            r=
            '/travel/travel_destination/climate./travel/travel_destination_monthly_climate/month',
            raw=True)
    if 'KG30C' in config.in_path:  # Sanity test
        exp.show_link_prediction(h='7F74B998',
                                 t='019EC1A3',
                                 r='paper_in_domain',
                                 raw=True)
    if 'KG94C' in config.in_path:  # Sanity test
        exp.show_link_prediction(h='7E52972F',
                                 t='80D75AD7',
                                 r='author_write_paper',
                                 raw=True)
Exemplo n.º 2
0
    def test_cls_init(self):
        env = get_env()

        exp = Experiment(
            # random_seed=0,
            epochs=1,
            model_cls='models.transformers.JointBERT',
            model_params={
                'bert_model_path': env['bert_dir'] + '/bert-base-cased',
                'labels_count': 3,
            },
            loss_func_cls='torch.nn.BCELoss',  # loss,
            model_output_to_loss_input=lambda ys: ys.double(),
            data_helper_cls='wiki.data_helpers.JointBERTWikiDataHelper',
            data_helper_params={
                'wiki_relations_path': '../wiki/relations.csv',
                'wiki_articles_path': '../wiki/docs.pickle',
                'labels': ['employer', 'country_of_citizenship'],
                # 'employer' # 'capital' # 'country_of_citizenship' #'educated_at' # 'opposite_of'
                'label_col': 'relation_name',
                'negative_sampling_ratio': 1.,
                'train_test_split': 0.7,
                'max_seq_length': 512,
                'train_batch_size': 4,
                'test_batch_size': 4,
                'bert_model_path':
                '/Volumes/data/repo/data/bert/bert-base-cased',
                # 'bert_tokenizer_cls': '',
                'bert_tokenizer_params': {
                    'do_lower_case': False,
                },
                'df_limit': 3,
            },
            tqdm_cls='tqdm.tqdm',
            output_dir='../output',
        )

        assert isinstance(exp.model, JointBERT)
        assert isinstance(exp.data_helper, JointBERTWikiDataHelper)
        assert isinstance(exp.loss_func, BCELoss)
        assert tqdm == exp.tqdm_cls

        print(flatten(exp.to_dict()))

        exp.run()
Exemplo n.º 3
0
        "node_count": 2,
        "activations": []
    },
    "layer2": {
        "input_count": 2,
        "node_count": 1,
        "activations:": []
    }
}

xorexp = Experiment(params_pso,
                    net_layers,
                    path="2in_xor.txt",
                    debugMode=False,
                    sampleMode=True)
xorexp.run()

##Single iterations of base experiments commented below
# net_single = {
#     "layer1": {
#         "input_count":1,
#         "node_count":1,
#         "activations": []
#     }
# }

# cubicexp = Experiment(params_pso, net_single, path="1in_cubic.txt", debugMode=False, sampleMode=True)
# cubicexp.run()

# linearexp = Experiment(params_pso, net_single, path="1in_linear.txt", debugMode=False, sampleMode=True)
# linearexp.run()
Exemplo n.º 4
0
def run_beta():
    params_pso = {
        "swarmsize": 40,
        "alpha": 1,
        "beta": 0,
        "gamma": 4.1,
        "delta": 0,
        "jumpsize": 1,
        "act_bound": 5,
        "weight_bound": 10,
        "bound_strat": 1,
        "num_informants": 3,
        "vel_range": 1,
        "max_runs": 1000,
        "informants_strat": 2
    }

    # net_layers = {
    #     "layer1": {
    #         "input_count":1,
    #         "node_count":1,
    #         "activations": []
    #     }
    # }

    # net_layers = {
    #     "layer1": {
    #         "input_count":2,
    #         "node_count":2,
    #         "activations": []
    #     },
    #     "layer2": {
    #         "input_count":2,
    #         "node_count": 1,
    #         "activations:":[]
    #     }
    # }

    net_layers = {
        "layer1": {
            "input_count":2,
            "node_count":2,
            "activations": []
        },
        "layer2": {
            "input_count":2,
            "node_count":2 ,
            "activations:":[]
        },
        "layer3": {
            "input_count":2,
            "node_count":1 ,
            "activations:":[]
        }
    }

    best_gamma = 0
    best_beta = 0
    best_error = None

    for j in range(0, 10):
        run_beta = 0
        run_gamma = 4.1
        run_best = None

        #first do 4.1 and 0
        params_pso["beta"] = 0
        params_pso["gamma"] = 4.1

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]

        #first do 0.5 and 3.6
        params_pso["beta"] = 0.5
        params_pso["gamma"] = 3.6

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]

        #first do 1 and 3.1
        params_pso["beta"] = 1.0
        params_pso["gamma"] = 3.1

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]


        #first do 1 and 3.1
        params_pso["beta"] = 1.5
        params_pso["gamma"] = 2.6

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]


        #first do 1 and 3.1
        params_pso["beta"] = 2.05
        params_pso["gamma"] = 2.05

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]

        #first do 1 and 3.1
        params_pso["beta"] = 2.6
        params_pso["gamma"] = 1.5

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]

        #first do 1 and 3.1
        params_pso["beta"] =  3.1
        params_pso["gamma"] = 1.0

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]


        #first do 1 and 3.1
        params_pso["beta"] =  0.5
        params_pso["gamma"] = 3.6

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]


        #first do 1 and 3.1
        params_pso["beta"] =  0.0
        params_pso["gamma"] = 4.1

        experiment1 = Experiment(params_pso, net_layers, path="2in_complex.txt", debugMode=False, sampleMode=True)
        experiment1.run()

        if (run_best == None or experiment1.pso.best.fitness < run_best):
            run_best = experiment1.pso.best.fitness
            run_beta = params_pso["beta"]
            run_gamma = params_pso["gamma"]

        print("\nRun ", j, " Beta: ", run_beta, " Gamma: ", run_gamma, " Error", run_best)

    print("\nOverall Beta: ", best_beta, " Gamma: ", best_gamma, " Error", best_error)
Exemplo n.º 5
0
from agents import RandomSearchAgent
from experiments import Experiment

import gym

seed = 16

env = gym.make('LunarLander-v2')
env.seed(seed)

ragent = RandomSearchAgent(name='RandomSearchAgent-1',
                           state_dim=env.observation_space.shape[0],
                           action_dim=env.action_space.n,
                           seed=seed,
                           stop_search_reward=210)
exp = Experiment(env, ragent, logdir="../log", verbose=True, num_episodes=1000)

exp.run()
Exemplo n.º 6
0
def run_swarmsize():
    print("\Swarmsize Cubic")
    print("=======================")

    params_pso = {
        "swarmsize": 0,
        "alpha": 1,
        "beta": 2.05,
        "gamma": 2.05,
        "delta": 0,
        "jumpsize": 1,
        "act_bound": 5,
        "weight_bound": 10,
        "bound_strat": 1,
        "num_informants": 3,
        "vel_range": 1,
        "max_runs": 1000,
        "informants_strat": 2
    }

    net_single = {
        "layer1": {
            "input_count": 1,
            "node_count": 1,
            "activations": []
        }
    }
    cubic_optimal_size = 0
    cubic_best = None
    for j in range(0, 10):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        cubic_optimal_size = 0
        cubic_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            print(params_pso["swarmsize"])
            experiment1 = Experiment(params_pso,
                                     net_single,
                                     path="1in_cubic.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (cubic_best == None
                    or experiment1.pso.best.fitness < cubic_best):
                cubic_best = experiment1.pso.best.fitness
                cubic_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", cubic_optimal_size, " produced",
              cubic_best)

    print("Cubic optimal size ", cubic_optimal_size, " produced", cubic_best)

    print("\Swarmsize Linear")
    print("=======================")
    linear_optimal_size = 0
    linear_best = None
    for j in range(0, 1):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        linear_optimal_size = 0
        linear_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            experiment1 = Experiment(params_pso,
                                     net_single,
                                     path="1in_linear.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (linear_best == None
                    or experiment1.pso.best.fitness < linear_best):
                linear_best = experiment1.pso.best.fitness
                linear_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", linear_optimal_size, " produced",
              linear_best)

    print("linear optimal size ", linear_optimal_size, " produced",
          linear_best)

    print("\Swarmsize Sine")
    print("=======================")
    sine_optimal_size = 0
    sine_best = None
    for j in range(0, 1):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        sine_optimal_size = 0
        sine_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            experiment1 = Experiment(params_pso,
                                     net_single,
                                     path="1in_sine.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (sine_best == None or experiment1.pso.best.fitness < sine_best):
                sine_best = experiment1.pso.best.fitness
                sine_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", sine_optimal_size, " produced",
              sine_best)

    print("sine optimal size ", sine_optimal_size, " produced", sine_best)

    net_layers = {
        "layer1": {
            "input_count": 1,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    print("\Swarmsize Tanh")
    print("=======================")
    tanh_optimal_size = 0
    tanh_best = None
    for j in range(0, 1):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        tanh_optimal_size = 0
        tanh_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            experiment1 = Experiment(params_pso,
                                     net_layers,
                                     path="1in_tanh.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (tanh_best == None or experiment1.pso.best.fitness < tanh_best):
                tanh_best = experiment1.pso.best.fitness
                tanh_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", tanh_optimal_size, " produced",
              tanh_best)

    print("tanh optimal size ", tanh_optimal_size, " produced", tanh_best)

    print("\Swarmsize XOR")
    print("=======================")
    xor_optimal_size = 0
    xor_best = None
    for j in range(0, 1):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        xor_optimal_size = 0
        xor_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            experiment1 = Experiment(params_pso,
                                     net_layers,
                                     path="2in_xor.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (xor_best == None or experiment1.pso.best.fitness < xor_best):
                xor_best = experiment1.pso.best.fitness
                xor_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", xor_optimal_size, " produced",
              xor_best)

    print("xor optimal size ", xor_optimal_size, " produced", xor_best)

    net_complex = {
        "layer1": {
            "input_count": 2,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 2,
            "activations:": []
        },
        "layer3": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    print("\Swarmsize Complex")
    print("=======================")
    complex_optimal_size = 0
    complex_best = None
    for j in range(0, 1):
        params_pso["swarmsize"] = 0
        print("\nRun ", j)
        complex_optimal_size = 0
        complex_best = None
        for i in range(0, 10):
            params_pso["swarmsize"] += 10
            experiment1 = Experiment(params_pso,
                                     net_complex,
                                     path="2in_complex.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment1.run()
            if (complex_best == None
                    or experiment1.pso.best.fitness < complex_best):
                complex_best = experiment1.pso.best.fitness
                complex_optimal_size = params_pso["swarmsize"]
        print("\nRun ", j, "best size", complex_optimal_size, " produced",
              complex_best)

    print("complex optimal size ", complex_optimal_size, " produced",
          complex_best)
Exemplo n.º 7
0
def run_final():
    print("\Final Cubic")
    print("=======================")

    params_pso = {
        "swarmsize": 40,
        "alpha": 1,
        "beta": 0,
        "gamma": 4.1,
        "delta": 0,
        "jumpsize": 1,
        "act_bound": 5,
        "weight_bound": 10,
        "bound_strat": 1,
        "num_informants": 3,
        "vel_range": 1,
        "max_runs": 1000,
        "informants_strat": 2
    }

    net_single = {
        "layer1": {
            "input_count": 1,
            "node_count": 1,
            "activations": []
        }
    }

    exp1 = 0
    for i in range(0, 10):
        print("\nRun ", i)
        experiment1 = Experiment(params_pso,
                                 net_single,
                                 path="1in_cubic.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment1.run()
        exp1 += experiment1.pso.best.fitness

    print("\nMse for final on cubic", exp1 / 10)

    params_pso["beta"] = 0.5
    params_pso["gamma"] = 3.6

    print("\Final Linear")
    print("=======================")
    exp2 = 0
    for i in range(0, 10):
        print("\nRun ", i)
        experiment2 = Experiment(params_pso,
                                 net_single,
                                 path="1in_linear.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment2.run()
        exp2 += experiment2.pso.best.fitness

    print("\nMse for final on linear", exp2 / 10)

    params_pso["beta"] = 0
    params_pso["gamma"] = 4.1

    print("\Final Sine")
    print("=======================")
    exp3 = 0
    for i in range(0, 10):
        print("Run ", i, "\n")
        experiment3 = Experiment(params_pso,
                                 net_single,
                                 path="1in_sine.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment3.run()
        exp3 += experiment3.pso.best.fitness

    print("\nMse for final on Sine", exp3 / 10)

    net_layers = {
        "layer1": {
            "input_count": 1,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    params_pso["beta"] = 0
    params_pso["gamma"] = 4.1

    print("\nFinal Tanh")
    print("=======================")
    exp4 = 0
    for i in range(0, 10):
        print("Run ", i, "\n")
        experiment4 = Experiment(params_pso,
                                 net_layers,
                                 path="1in_tanh.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment4.run()
        exp4 += experiment4.pso.best.fitness

    print("\nMse for final on Tanh", exp4 / 10)

    net_layers = {
        "layer1": {
            "input_count": 2,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    params_pso["beta"] = 0
    params_pso["gamma"] = 4.1

    print("\nFinal XOR")
    print("=======================")
    exp6 = 0
    for i in range(0, 10):
        print("\nRun ", i)
        experiment6 = Experiment(params_pso,
                                 net_layers,
                                 path="2in_xor.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment6.run()
        exp6 += experiment6.pso.best.fitness

    print("\nMse for final on XOR", exp6 / 10)

    print("\nFinal Complex")
    print("=======================")

    net_complex = {
        "layer1": {
            "input_count": 2,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 2,
            "activations:": []
        },
        "layer3": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    params_pso["beta"] = 2.05
    params_pso["gamma"] = 2.05

    exp5 = 0
    for i in range(0, 10):
        print("\nRun ", i, "\n")
        experiment5 = Experiment(params_pso,
                                 net_complex,
                                 path="2in_complex.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment5.run()
        exp5 += experiment5.pso.best.fitness

    print("\nMse for final on Complex", exp5 / 10)
Exemplo n.º 8
0
def run_informant_strat():

    params_pso = {
        "swarmsize": 40,
        "alpha": 1,
        "beta": 0.805,
        "gamma": 3.295,
        "delta": 0,
        "jumpsize": 1,
        "act_bound": 5,
        "weight_bound": 10,
        "bound_strat": 1,
        "num_informants": 3,
        "vel_range": 1,
        "max_runs": 1000,
        "informants_strat": 0
    }

    net_simple = {
        "layer1": {
            "input_count": 1,
            "node_count": 1,
            "activations": []
        }
    }

    net_simple_2 = {
        "layer1": {
            "input_count": 2,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    exp2 = 0
    for i in range(0, 10):
        print("\nRun ", i)
        experiment2 = Experiment(params_pso,
                                 net_simple,
                                 path="1in_sine.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment2.run()
        exp2 += experiment2.pso.best.fitness

    print("\nMse for strat 0 on linear", exp2 / 10)

    exp2 = 0
    params_pso["informants_strat"] = 1
    for i in range(0, 10):
        print("\nRun ", i)
        experiment2 = Experiment(params_pso,
                                 net_simple,
                                 path="1in_sine.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment2.run()
        exp2 += experiment2.pso.best.fitness

    print("\nMse for strat 1 on linear", exp2 / 10)

    exp2 = 0
    params_pso["informants_strat"] = 2
    for i in range(0, 10):
        print("\nRun ", i)
        experiment2 = Experiment(params_pso,
                                 net_simple,
                                 path="1in_sine.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment2.run()
        exp2 += experiment2.pso.best.fitness

    print("\nMse for strat 2 on linear", exp2 / 10)

    exp2 = 0
    params_pso["informants_strat"] = 3
    for i in range(0, 10):
        print("\nRun ", i)
        experiment2 = Experiment(params_pso,
                                 net_simple,
                                 path="1in_sine.txt",
                                 debugMode=False,
                                 sampleMode=True)
        experiment2.run()
        exp2 += experiment2.pso.best.fitness

    print("\nMse for strat 2 on linear", exp2 / 10)
Exemplo n.º 9
0
def run_informant_count():

    results = open("informantsresults.txt", "w+")
    results.write("\n\nTEST")

    params_pso = {
        "swarmsize": 40,
        "alpha": 1,
        "beta": 2.05,
        "gamma": 2.05,
        "delta": 0,
        "jumpsize": 1,
        "act_bound": 5,
        "weight_bound": 10,
        "bound_strat": 1,
        "num_informants": 0,
        "vel_range": 1,
        "max_runs": 1000,
        "informants_strat": 2
    }

    net_simple = {
        "layer1": {
            "input_count": 1,
            "node_count": 1,
            "activations": []
        }
    }

    params_pso["beta"] = 1.18
    params_pso["gamma"] = 2.92

    bestError = None
    bestNum = 0
    params_pso["num_informants"] = 0
    for i in range(40):
        params_pso["num_informants"] = i + 1
        error = 0
        for j in range(10):
            #  print("\nRun ", i)
            experiment2 = Experiment(params_pso,
                                     net_simple,
                                     path="1in_linear.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment2.run()
            error += experiment2.pso.best.fitness
        avg = error / 10
        results.write("\nLinear - Avg error for " + str(i) + " informants: " +
                      str(avg))
        if (bestError == None or avg < bestError):
            bestError = avg
            bestNum = i + 1
    results.write("\nBest Informants for linear " + str(bestNum) +
                  " Informants : " + str(bestError))

    #Cubic
    params_pso["beta"] = 1.325
    params_pso["gamma"] = 2.775

    bestError = None
    bestNum = 0
    params_pso["num_informants"] = 0
    for i in range(40):
        params_pso["num_informants"] = i + 1
        error = 0
        for j in range(10):
            #  print("\nRun ", i)
            experiment2 = Experiment(params_pso,
                                     net_simple,
                                     path="1in_cubic.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment2.run()
            error += experiment2.pso.best.fitness
        avg = error / 10
        results.write("\nCubic - Avg error for " + str(i) + " informants: " +
                      str(avg))
        if (bestError == None or avg < bestError):
            bestError = avg
            bestNum = i + 1
    results.write("\nBest Informants for cubic " + str(bestNum) +
                  " Informants : " + str(bestError))

    #Sine
    params_pso["beta"] = 0.91
    params_pso["gamma"] = 3.19

    bestError = None
    bestNum = 0
    params_pso["num_informants"] = 0
    for i in range(40):
        params_pso["num_informants"] = i + 1
        error = 0
        for j in range(10):
            #  print("\nRun ", i)
            experiment2 = Experiment(params_pso,
                                     net_simple,
                                     path="1in_sine.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment2.run()
            error += experiment2.pso.best.fitness
        avg = error / 10
        results.write("\nSine - Avg error for " + str(i) + " informants: " +
                      str(avg))
        if (bestError == None or avg < bestError):
            bestError = avg
            bestNum = i + 1
    results.write("\nBest Informants for sine " + str(bestNum) +
                  " Informants : " + str(bestError))

    #Tanh
    net_layers = {
        "layer1": {
            "input_count": 1,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    params_pso["beta"] = 0.805
    params_pso["gamma"] = 3.295

    bestError = None
    bestNum = 0
    params_pso["num_informants"] = 0
    for i in range(40):
        params_pso["num_informants"] = i + 1
        error = 0
        for j in range(10):
            #  print("\nRun ", i)
            experiment2 = Experiment(params_pso,
                                     net_layers,
                                     path="1in_tanh.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment2.run()
            error += experiment2.pso.best.fitness
        avg = error / 10
        results.write("\nTanh - Avg error for " + str(i) + " informants: " +
                      str(avg))
        if (bestError == None or avg < bestError):
            bestError = avg
            bestNum = i + 1
    results.write("\nBest Informants for tanh " + str(bestNum) +
                  " Informants : " + str(bestError))

    #xor
    net_layers = {
        "layer1": {
            "input_count": 2,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    params_pso["beta"] = 1.125
    params_pso["gamma"] = 2.975

    bestError = None
    bestNum = 0
    params_pso["num_informants"] = 0
    for i in range(40):
        params_pso["num_informants"] = i + 1
        error = 0
        for j in range(10):
            #  print("\nRun ", i)
            experiment2 = Experiment(params_pso,
                                     net_layers,
                                     path="2in_xor.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment2.run()
            error += experiment2.pso.best.fitness
        avg = error / 10
        results.write("\nXor - Avg error for " + str(i) + " informants: " +
                      str(avg))
        if (bestError == None or avg < bestError):
            bestError = avg
            bestNum = i + 1
    results.write("\nBest Informants for XOR " + str(bestNum) +
                  " Informants : " + str(bestError))

    #Complex
    net_complex = {
        "layer1": {
            "input_count": 2,
            "node_count": 2,
            "activations": []
        },
        "layer2": {
            "input_count": 2,
            "node_count": 2,
            "activations:": []
        },
        "layer3": {
            "input_count": 2,
            "node_count": 1,
            "activations:": []
        }
    }

    params_pso["beta"] = 1.38
    params_pso["gamma"] = 2.72

    bestError = None
    bestNum = 0
    params_pso["num_informants"] = 0
    for i in range(40):
        params_pso["num_informants"] = i + 1
        error = 0
        for j in range(10):
            #  print("\nRun ", i)
            experiment2 = Experiment(params_pso,
                                     net_complex,
                                     path="2in_complex.txt",
                                     debugMode=False,
                                     sampleMode=True)
            experiment2.run()
            error += experiment2.pso.best.fitness
        avg = error / 10
        results.write("\nComplex - Avg error for " + str(i) + " informants: " +
                      str(avg))
        if (bestError == None or avg < bestError):
            bestError = avg
            bestNum = i + 1
    results.write("\nBest Informants for Complex " + str(bestNum) +
                  " Informants : " + str(bestError))
    results.close()
Exemplo n.º 10
0
    name='FullDQNAgent-1',
    state_dim=env.observation_space.shape[0],
    action_dim=env.action_space.n,
    epsdecay=0.975,
    buffersize=500000,
    samplesize=32,
    minsamples=1000,
    gamma=0.99,
    update_target_freq=600,
    nnparams={  # Improved DQN setting
        'hidden_layers': [(50, 'relu'), (40, 'relu')],
        'loss': hubert_loss,
        'optimizer': Adam(lr=0.0005),
        'target_network': True
    })

# Create an experiment with the LunarLander env and improved DQN agent for 500 train/test episodes
exp = Experiment(env, ragent, logdir="../log", verbose=True, num_episodes=500)

# Training trials
exp.run(testmode=False)

# Test trials
exp.run(testmode=True)

# Evaluate the agent in test trials
evaluate('../log/LunarLander_FullDQNAgent-1_test_data.csv',
         '../log/LunarLander_FullDQNAgent-1_test_evaluation.png', 'Experiment')

#plt.show()
Exemplo n.º 11
0
if __name__ == '__main__':
    # TODO Use 'SimpleBaselineBinaryNetTanh' if you want labels [-1,1]
    baseline = 'SimpleBaselineNet'
    dataset = 'CIFAR_10'
    distortion_type = 'AE_shift'
    property = 'entropy'
    property_perc = [0.25, 0.5]
    classes = range(0, 10)
    distortion_mean = [2, 3]

    for p in property_perc:
        experiment = Experiment(baseline=baseline, dataset=dataset, distortion_type=distortion_type,
                                mean=0, std=0, random=False, boosting_perc=1.0,
                                property='cross_entropy', property_perc=p, most=True, alternate=False,
                                classes=classes, classes_to_distort=None)
        experiment.run()
        experiment = Experiment(baseline=baseline, dataset=dataset, distortion_type=distortion_type,
                                mean=0, std=0, random=False, boosting_perc=1.0,
                                property='cross_entropy', property_perc=p, most=False, alternate=False,
                                classes=classes, classes_to_distort=None)
        experiment.run()
        experiment = Experiment(baseline=baseline, dataset=dataset, distortion_type=distortion_type,
                                mean=0, std=0, random=True, boosting_perc=1.0,
                                property='cross_entropy', property_perc=p, most=True, alternate=False,
                                classes=classes, classes_to_distort=None)
        experiment.run()


    exit()
    #
    # for p in property_perc:
Exemplo n.º 12
0
def run(output_dir, gpu_id: int, config_name, **override_config):
    """

    :param output_dir:
    :param gpu_id: GPU (-1 == CPU)
    :param config_name: Predefined experiment config
    :param override_config: Use kwargs to override config variables, e.g., --foo__bar=1 (nested dict with __)
    :return:
    """
    output_dir = os.path.join(output_dir, config_name)

    logger.info(f'Starting... {config_name}')

    if os.path.exists(output_dir):
        logger.error(f'Output dir exist already: {output_dir}')
        sys.exit(1)

    # GPU
    if gpu_id < 0:
        logger.info('GPU is disabled')
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)  # check with nvidia-smi

        import torch

        if not torch.cuda.is_available():
            logger.error('CUDA is not available!')
            sys.exit(1)

    # Predefined configs
    config_name = 'experiments.predefined.' + config_name
    try:
        package, module_name = config_name.rsplit('.', 1)
        module = import_module(package)
        config = getattr(module, module_name)

        assert isinstance(config, dict) == True
    except ValueError:
        logger.error(f'Cannot load experiment config from: {config_name}')
        sys.exit(1)

    # Override config
    from experiments.predefined import update
    from experiments.utils import unflatten

    if override_config:
        override_config = unflatten(override_config)

        logger.info(f'Override config with: {override_config}')
        config = update(config, override_config)

    from experiments import Experiment

    exp = Experiment(**config)

    exp.run(mode=2)

    # save
    os.makedirs(output_dir)
    exp.output_dir = output_dir

    with open(os.path.join(output_dir, 'experiment.pickle'), 'wb') as f:
        # json.dump(exp.to_dict(), f)
        pickle.dump(exp.to_dict(), f)

    with open(os.path.join(output_dir, 'reports.json'), 'w') as f:
        json.dump(exp.reports, f)

    exp.save()

    logger.info('Done')