Exemplo n.º 1
0
def test_factory():

    # Generate the network & experiment.
    # This should generally be done in a separate script
    network = pyphi.examples.basic_network()
    state = (1, 0, 0)
    experiment = Experiment('test_factory', '1.0', network, state)
    experiment.initialize()

    port = 10030
    mechanisms = pyphi.utils.powerset(network.node_indices, nonempty=True)

    with WorkerFactory(experiment) as factory:
        start_master(experiment, mechanisms, port)

    # Load CES
    ces = experiment.load_ces()
    print(ces)

    # Check results
    reference_ces = pyphi.compute.ces(pyphi.Subsystem(network, state))
    assert ces.phis == reference_ces.phis
    assert ces.mechanisms == reference_ces.mechanisms

    print('All good!')
Exemplo n.º 2
0
def test_simple():

    network = pyphi.examples.basic_network()
    state = (1, 0, 0)
    experiment = Experiment('test_simple', '1.0', network, state)
    experiment.initialize()

    mechanisms = pyphi.utils.powerset(network.node_indices, nonempty=True)

    print('Starting worker...')
    worker = subprocess.Popen([
        'work_queue_worker',
        '-N',
        experiment.project_name,
        '-P',
        experiment.password_file,
        #            '-d', 'all'
    ])

    try:
        print('Starting master...')
        start_master(experiment,
                     mechanisms,
                     port=10021,
                     timeout=0,
                     n_divisions=2)
    except:
        raise
    finally:
        print('Killing worker...')
        worker.kill()
        worker.wait()

    print('Done.')

    ces = experiment.load_ces()
    print(ces)

    reference_ces = pyphi.compute.ces(pyphi.Subsystem(network, state))
    assert ces.phis == reference_ces.phis
    assert ces.mechanisms == reference_ces.mechanisms

    print('All good!')
Exemplo n.º 3
0
APEX_AVAILABLE = False
if args.fp16:
    try:
        from apex import amp
        APEX_AVAILABLE = True
    except ModuleNotFoundError:
        raise ImportError(
            "Please install apex from https://www.github.com/nvidia/apex to run this example."
        )
        APEX_AVAILABLE = False

rootdir = pathlib.Path(".")

from utils import Experiment
exp = Experiment(rootdir / "models" / args.name)
logging = exp.get_logger()

logging('=' * 60)
pprint.pprint(args.__dict__)
logging('=' * 60)

# tensorboard stuff
#writer = SummaryWriter()


def run_epoch(model, loader, opt=None, scheduler=None, _epoch=-1):
    model.eval() if opt is None else model.train()
    dev = next(model.parameters()).device
    batch_id = 0
    total_loss = 0  # loss for log interval
Exemplo n.º 4
0
    logger.info("INFO: Starting inference...")
    evaluator_engine.run(loader_test)

    save_predictions(evaluator_engine,
                     os.path.splitext(m_cp_path)[0],
                     accumulate_predictions.state_dict)


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument('-c',
                        '--config',
                        default=None,
                        type=str,
                        required=True,
                        help='config file path (default: None)')
    parser.add_argument('--checkpoint',
                        default=None,
                        type=str,
                        help='Checkpoint to use for test')
    args = parser.parse_args()

    config = Experiment.load_from_path(args.config)

    if args.checkpoint:
        config.checkpoint = args.checkpoint

    assert config, "Config could not be loaded."

    main(config)
Exemplo n.º 5
0
        #print(inpt,'\n',sw,'\n',swN, '\n''------''\n')
        V = 0
        for v in range(0, nN):
            V = V + istate[v] * 2**v
        tpm[int(V)] = tuple(swN)

    # Create the connectivity matrix
    cm = np.abs(np.where(weights != 0, 1, 0))

    # Transpose our (receiving, sending) CM to use the PyPhi convention of (sending, recieving)
    cm = np.transpose(cm)

    # Create the network
    subsystem_labels = ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
                        'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T')
    network = pyphi.Network(tpm, cm, subsystem_labels)

    # In[5]:

    # Set state and create subsystem
    #A  B  C  D  E  F  G  H  I  J  K  L  M  N  O  P  Q  R  S  T#
    state = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
    subsystem = pyphi.Subsystem(network, state, range(network.size))
    A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T = subsystem.node_indices

    pyphi.config.REPR_VERBOSITY = 1

    experiment = Experiment('largepyr', '2.1', network, state)
    experiment.initialize()
Exemplo n.º 6
0
import itertools
import pyphi
from utils import Experiment
from master import WorkerFactory, start_master

if __name__ == '__main__':
    elements = list(range(20))
    state = [0 for x in elements]

    experiment = Experiment('iv_manet', '5.0', None, state)

    # fourth = mechanisms_for_order(elements, 4)
    # random.shuffle(fourth)
    def mechanisms_for_order(elements, n):
        return pyphi.utils.combs(elements, n).tolist()

    mechanisms = itertools.chain(
        mechanisms_for_order(elements, 3),
        mechanisms_for_order(elements, 2),
        mechanisms_for_order(elements, 1),
        # fourth
    )

    port = 10010

    with WorkerFactory(experiment, memory=10024) as f:
        start_master(experiment, mechanisms, port=port, n_divisions=10)
Exemplo n.º 7
0
def main(args):
    num_epochs = args.ne
    batch_size = args.bs

    transform = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(227),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    dataset_path = args.dataset_path
    json_labels_path = args.json_labels_path

    data_loader = get_wiki_data_loader(dataset_path,
                                       json_labels_path,
                                       transform,
                                       batch_size,
                                       shuffle=True,
                                       num_workers=args.n_workers)

    if args.out_dim == -1:
        out_dim = None
    else:
        out_dim = args.out_dim
    if args.ttn:
        cnn = CNN(args.n_topics,
                  args.n_kernels,
                  mixture_model=False,
                  cnn=args.cnn,
                  pretrained=args.pretrained)
    else:
        cnn = CNN(args.n_topics,
                  args.n_kernels,
                  out_dim=out_dim,
                  cnn=args.cnn,
                  pretrained=args.pretrained)

    if torch.cuda.is_available():
        cnn.cuda()
    cnn.train()

    optimizer = optim.SGD(cnn.parameters(), lr=args.lr, momentum=args.mm)
    # optimizer = optim.Adam(cnn.parameters())
    # optimizer = optim.RMSprop(cnn.parameters(), lr= args.lr, momentum=args.mm)

    exp = Experiment(args, args.exp_name)

    if args.ttn:
        loss_fn = torch.nn.modules.loss.BCEWithLogitsLoss(reduction='sum')
    else:
        loss_fn = nll_loss

    learning_rate = args.lr
    losses = []

    for epoch in range(num_epochs):
        learning_rate = update_lr_epoch(epoch, args, learning_rate, optimizer)

        for step, (images, ts) in enumerate(data_loader):
            if torch.cuda.is_available():
                images = images.cuda()
                ts = ts.cuda()
            cnn.zero_grad()

            if not args.ttn:
                alpha, sigma, mu = cnn(images)
                loss = loss_fn(alpha, sigma, mu, ts)
            else:
                out = cnn(images)
                loss = loss_fn(out, ts)
            loss.backward()
            if args.clipping != 0.:
                torch.nn.utils.clip_grad_norm_(cnn.parameters(), args.clipping)
            optimizer.step()

            losses.append(float(loss))
            exp.save_loss(epoch, step, loss)
            print('Epoch ' + str(epoch + 1) + '/' + str(num_epochs) +
                  ' - Step ' + str(step + 1) + '/' + str(len(data_loader)) +
                  ' - Loss: ' + str(float(loss)))
        exp.save_loss_epoch(epoch, losses)
        losses = []
        if epoch % args.save_epoch == 0 and epoch > 0:
            exp.save_model(epoch, cnn)

    exp.save_model('last', cnn)
Exemplo n.º 8
0
    parser.add_argument('-c', '--config', default=None, type=str, required=True, help='config file path (default: None)')
    parser.add_argument('--checkpoint', default=None, type=str, help='Checkpoint tag to reload')    
    parser.add_argument('--checkpoint_dir', default=None, type=str, help='Checkpoint directory to reload')
    parser.add_argument('--suffix', default=None, type=str, help='Add to the name')
    parser.add_argument('--epochs', default=None, type=int, help='Number of epochs')
    parser.add_argument('--resume_from', default=None, type=int, help='Epoch to resume from, allows using checkpoints as initialisation')
    args = parser.parse_args()

    OVERLOADABLE = ['checkpoint', 'epochs', 'checkpoint_dir', 'resume_from']

    overloaded = {}
    for k, v in vars(args).items():
        if (k in OVERLOADABLE) and (v is not None):
            overloaded[k] = v

    config = Experiment.load_from_path(args.config, overloaded, args.suffix)

    assert config, "Config could not be loaded."

    # Else load the saved config from the results dir or throw an error if one doesn't exist
    if len(config.checkpoint) > 0:
        logger.warning("WARNING: --config specifies resuming, overriding config with exising experiment config.")
        # resume_config = Experiment(config.name, desc=config.desc, result_dir=config.result_dir).load()
        # assert resume_config is not None, "No experiment {} exists, cannot resume training".format(config.name)
        # config = resume_config
        assert config, "Config could not be loaded for resume"
    # If we have resume_from in the config but have it < 0 to start a fresh training run then throw and error if the directory already exists
    elif config.overwrite is False:
        assert not config.exists(), "Results directory {} already exists! Please specify a new experiment name or the remove old files.".format(config.result_path)
    else:
        empty_folder(config.result_path)
Exemplo n.º 9
0
def main():
    ###########################################################################
    ### SPECIFY DATA PATHS
    P = '/local/meliao/projects/fourier_neural_operator/'
    DATA_DIR = os.path.join(P, 'data/')
    MODEL_DIR = os.path.join(P, 'experiments/31_different_activations/models')
    PLOTS_DIR = os.path.join(P, 'experiments/31_different_activations/plots/')
    RESULTS_DIR = os.path.join(P,
                               'experiments/31_different_activations/results')
    if not os.path.isdir(PLOTS_DIR):
        os.mkdir(PLOTS_DIR)

    ###########################################################################
    ### LOAD DATA

    FP_FMT = "2021-09-29_NLS_data_00_{}_test.mat"
    # DSET_KEYS = ['00', '01', '02', '03', '04']
    DSET_NAME_DD = {
        '00': 'Flat DFT Coeffs on [1, ..., 5]',
        '01': 'GRF Original',
        '02': 'GRF on [1, ..., 5]',
        '03': 'GRF high coefficient decay',
        '04': 'GRF low coefficient decay'
    }
    data_fp_dd = {
        i: os.path.join(DATA_DIR, FP_FMT.format(i))
        for i in DSET_NAME_DD.keys()
    }

    raw_data_dd = {k: sio.loadmat(v) for k, v in data_fp_dd.items()}

    data_dd = {
        k: OneStepDataSetComplex(v['output'], v['t'], v['x'])
        for k, v in raw_data_dd.items()
    }

    # data_dd = sio.loadmat(DATA_FP)
    # dset = OneStepDataSetComplex(data_dd['output'], data_dd['t'], data_dd['x'])

    ###########################################################################
    ### SET UP EXPERIMENT

    experiment = Experiment.MultiDataExperiment(name="31")
    experiment.register_data_variable(data_dd)

    ACTIVATIONS = ['tanh', 'sigmoid', 'relu', 'sin']
    # ACTIVATIONS = ['relu', 'sin']
    K_ACTIVATION = 'activation'
    experiment.register_new_variable(K_ACTIVATION, ACTIVATIONS)

    ###########################################################################
    ### LOAD MODELS

    MODEL_FMT = "time_10_dset_{dataset}_activation_{activation}_ep1000"
    experiment.load_models(MODEL_DIR, MODEL_FMT)

    ###########################################################################
    ### MAKE PREDICTIONS

    with torch.no_grad():
        experiment.run_prediction_and_errors(
            prediction_fn=data_management.prediction_fn_composed,
            data_spec_fn=data_management.data_spec_fn_fno,
            error_fn=data_management.error_fn_l2_normalized)

    sin_relu_set = set()
    sin_relu_set.add('sin')
    sin_relu_set.add('relu')
    sin_relu_dd = {'activation': sin_relu_set}
    for dset_key, dset_name in DSET_NAME_DD.items():
        logging.info(f"Plotting errors on dset {dset_key}")

        # make plot for all activation types
        plot_fp = os.path.join(PLOTS_DIR,
                               f"dset_{dset_key}_composed_errors_all.png")
        dset_vals = set()
        dset_vals.add(dset_key)
        filter_dd = {'dataset': dset_vals}
        errors_dd = experiment.get_error_dd("{activation}",
                                            remove_first_row=True,
                                            **filter_dd)
        plotting_utils.plot_time_errors(errors_dd=errors_dd,
                                        title=f"Errors on dataset {dset_name}",
                                        fp=plot_fp)

        # make plot for sin and relu
        plot_fp_i = os.path.join(
            PLOTS_DIR, f"dset_{dset_key}_composed_errors_sin-relu.png")
        sin_relu_dd['dataset'] = dset_vals
        errors_dd_i = experiment.get_error_dd("{activation}",
                                              remove_first_row=True,
                                              **sin_relu_dd)
        plotting_utils.plot_time_errors(errors_dd=errors_dd_i,
                                        title=f"Errors on dataset {dset_name}",
                                        fp=plot_fp_i)

        # make predictions plot for sin and relu
        preds_dd = experiment.get_preds_dd("{activation}", **sin_relu_dd)
        for test_case in range(5):
            plot_fp_i = os.path.join(
                PLOTS_DIR,
                f'test_case_{test_case}_dset_{dset_key}_sin-relu.png')

            preds_dd_for_plt = {k: v[test_case] for k, v in preds_dd.items()}
            solns = experiment.dataset_dd[dset_key].X[test_case]
            plotting_utils.plot_one_testcase_panels(
                preds_dd=preds_dd_for_plt,
                solns=solns,
                show_n_timesteps=5,
                title=f"Testcase {test_case} on dataset {dset_name}",
                fp=plot_fp_i)
Exemplo n.º 10
0
def main(args):
    experiment = Experiment(args)
    experiment.show_args()
    experiment.export_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    env = make_env(args.env_id, args.batch_size, seed=args.seed)

    if args.recurrent:
        agent = RecurrentBackpropamineAgent(
            env.obs_shape,
            feature_size=args.feature_size,
            hidden_size=args.hidden_size,
            action_size=env.action_size,
        ).to(device)
    else:
        agent = ForwardBackpropamineAgent(
            env.obs_shape,
            feature_size=args.feature_size,
            hidden_size=args.hidden_size,
            action_size=env.action_size,
        ).to(device)

    optimizer = optim.Adam(agent.parameters(), lr=args.lr)
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=args.lr_decay_step_size,
                                          gamma=args.lr_decay_gamma)

    for lifetime in range(args.num_lifetimes):
        obs = env.reset().to(device)

        h = hebb = None

        # initialize previous actions and rewards
        prev_action = torch.zeros(args.batch_size, env.action_size).to(device)
        prev_reward = torch.zeros(args.batch_size, 1).to(device)

        lifetime_reward = []
        lifetime_action_log_prob = []
        lifetime_entropy = []
        lifetime_value_pred = []

        for step in range(args.lifetime_length):
            action_probs, value_pred, m, h, hebb = agent(
                obs, prev_action, prev_reward, h, hebb)
            pi = distributions.OneHotCategorical(probs=action_probs)
            action_one_hot = pi.sample()
            action_log_prob = pi.log_prob(action_one_hot).unsqueeze(1)
            entropy = pi.entropy().unsqueeze(1)

            action = torch.argmax(action_one_hot, dim=1).cpu()
            obs, reward = env.step(action)
            obs = obs.to(device)
            reward = reward.to(device)

            prev_action = action_one_hot
            prev_reward = reward

            lifetime_reward.append(reward)
            lifetime_action_log_prob.append(action_log_prob)
            lifetime_entropy.append(entropy)
            lifetime_value_pred.append(value_pred)

        # lifetime over! compute losses and train the agent

        policy_loss = 0.0
        value_loss = 0.0

        gae = 0.0  # generalized advantage estimation
        ret = 0.0  # return / utility

        lifetime_value_pred.append(0.0)  # assuming end of "episode"

        for t in reversed(range(args.lifetime_length)):
            reward = lifetime_reward[t]
            value_next = lifetime_value_pred[t + 1]
            value = lifetime_value_pred[t]
            action_log_prob = lifetime_action_log_prob[t]
            entropy = lifetime_entropy[t]

            ret = args.gamma * ret + reward
            td_err = reward + args.gamma * value_next - value
            gae = args.gamma * args.lam * gae + td_err

            policy_loss -= action_log_prob * gae.detach(
            ) + args.entropy_coef * entropy
            value_loss += 0.5 * (ret - value)**2

        policy_loss = policy_loss.mean(dim=0) / args.lifetime_length
        value_loss = value_loss.mean(dim=0) / args.lifetime_length

        loss = policy_loss + args.value_coef * value_loss
        mean_reward = sum(lifetime_reward).mean(dim=0).item()

        optimizer.zero_grad()
        loss.backward()
        clip_grad_norm_(agent.parameters(), args.max_grad_norm)
        optimizer.step()
        scheduler.step()

        if (lifetime + 1) % args.log_interval == 0:
            experiment.log(
                lifetime,
                policy_loss=policy_loss.item(),
                value_loss=value_loss.item(),
                total_loss=loss.item(),
                mean_reward=mean_reward,
            )

        if (lifetime + 1) % args.checkpoint_interval == 0:
            experiment.checkpoint(lifetime, agent, optimizer, loss)

        if (lifetime + 1) % args.update_best_interval == 0:
            experiment.update_best(agent, mean_reward)
Exemplo n.º 11
0
def main():
    ###########################################################################
    ### SPECIFY DATA PATHS
    logging.info("Beginning plotting")
    P = '/local/meliao/projects/fourier_neural_operator/'
    DATA_DIR = os.path.join(P, 'data/')
    MODEL_DIR = os.path.join(P, 'experiments/31_different_activations/models')
    PLOTS_DIR = os.path.join(P, 'experiments/32_scaling_for_freq/plots/')
    if not os.path.isdir(PLOTS_DIR):
        os.mkdir(PLOTS_DIR)

    ###########################################################################
    ### LOAD DATA

    FP_FMT = "2021-09-29_NLS_data_00_{}_test.mat"
    DSET_NAME_DD = {
        # '00': 'Flat DFT Coeffs on [1, ..., 5]',
        '01': 'GRF Original',
        # '02': 'GRF on [1, ..., 5]',
        # '03': 'GRF high coefficient decay',
        '04': 'GRF low coefficient decay'
    }
    data_fp_dd = {
        i: os.path.join(DATA_DIR, FP_FMT.format(i))
        for i in DSET_NAME_DD.keys()
    }

    raw_data_dd = {k: sio.loadmat(v) for k, v in data_fp_dd.items()}

    data_dd = {
        k: OneStepDataSetComplex(v['output'], v['t'], v['x'])
        for k, v in raw_data_dd.items()
    }

    scaled_data_dd = {
        k: FreqScalingDataSet(raw_data_dd['04']['output'],
                              raw_data_dd['04']['t'],
                              raw_data_dd['04']['x'],
                              scale_param=1 / k)
        for k in [2, 3]
    }
    ###########################################################################
    ### SET UP COMPOSED PREDS EXPERIMENT AND MAKE PREDICTIONS

    experiment1 = Experiment.MultiDataExperiment(name="Standard data dd")
    experiment1.register_data_variable(data_dd)

    ACTIVATIONS = ['relu', 'sin']

    K_ACTIVATION = 'activation'
    experiment1.register_new_variable(K_ACTIVATION, ACTIVATIONS)

    MODEL_FMT = "time_10_dset_{dataset}_activation_{activation}_ep1000"
    experiment1.load_models(model_dir=MODEL_DIR, model_fmt=MODEL_FMT)

    with torch.no_grad():
        experiment1.run_prediction_and_errors(
            prediction_fn=data_management.prediction_fn_composed,
            data_spec_fn=data_management.data_spec_fn_fno,
            error_fn=data_management.error_fn_l2_normalized)
    ###########################################################################
    ### SET UP SCALING BY 2 EXPERIMENT

    experiment2 = Experiment.MultiDataExperiment(name='Scaling dd')
    experiment2.register_data_variable(scaled_data_dd)
    experiment2.register_new_variable(K_ACTIVATION, ['relu', 'sin'])
    experiment2.register_new_variable('train_dset', ['01', '03', '04'])
    MODEL_FMT = "time_10_dset_{train_dset}_activation_{activation}_ep1000"

    experiment2.load_models(model_dir=MODEL_DIR, model_fmt=MODEL_FMT)

    with torch.no_grad():
        experiment2.run_prediction_and_errors(
            prediction_fn=data_management.prediction_fn_scale_for_freq,
            data_spec_fn=data_management.data_spec_fn_fno,
            error_fn=data_management.error_fn_l2_normalized)
Exemplo n.º 12
0
import pandas as pd
from utils import read_stimuli, Experiment, Stimulus, Trial
from random import shuffle
import psychopy as psy

path = "C:\\Users\\Sophie\\Documents\\Formación y Trabajo\Werk\\PhD MPI\\2020-IMPRS-python-course\\lexical-decision"

# initiate the experiment
experiment = Experiment(window_size=[800, 600],
                        text_color=-1,
                        background_color=1)

# load the text file and create a dictionary of the stimuli
stimuli = read_stimuli()
stimulus_objects = [
    Stimulus(stim["stim_id"], stim["freq_category"], stim["word"], path)
    for i, stim in stimuli.iterrows()
]
trials = [
    Trial(experiment, stimulus, delay=0.0, max_target_time=2)
    for stimulus in stimulus_objects
]

# shuffle the stimuli
shuffle(trials)

experiment.show_message(
    "Welkom!\n\nIn dit experiment ga je naar korte fragmenten luisteren. " +
    "Het is jouw taak om te beslissen of wat je hoort een echt woord is of niet.\n"
    + "Druk op Z voor \'ja\' en M voor \'nee\'.\n\n" +
    "Het experiment duurt ongeveer 10 minuten.\n" +
Exemplo n.º 13
0
    logger.info("INFO: Starting training...")
    trainer_engine.run(loader_train, max_epochs=config.epochs)


if __name__ == "__main__":
    parser = ArgumentParser()
    parser.add_argument('-c',
                        '--config',
                        default=None,
                        type=str,
                        required=True,
                        help='config file path (default: None)')
    args = parser.parse_args()

    config = Experiment.load_from_path(args.config)

    assert config, "Config could not be loaded."

    # Else load the saved config from the results dir or throw an error if one doesn't exist
    if config.resume_from >= 0:
        logger.warning(
            "WARNING: --config specifies resuming, overriding config with exising experiment."
        )
        resume_config = Experiment(config.name,
                                   desc=config.desc,
                                   result_dir=config.result_dir).load()
        assert resume_config is not None, "No experiment {} exists, cannot resume training".format(
            config.name)
        config = resume_config
        assert config, "Config could not be loaded for resume"
Exemplo n.º 14
0
import numpy as np
from tqdm import tqdm
import os
import matplotlib.pyplot as plt

file = 'reports.pbz2'
open_reports = True
n_trials = 1000

if not os.path.isfile(file):  # si no están los experimentos guardados
    probs = 1 / np.logspace(0, 4, 10)
    lengths = np.logspace(1, 5, 10, dtype=int)
    experiments = []
    for n in tqdm(lengths, ncols=100):
        for p in probs:
            current_experiment = Experiment(p, n, n_trials)
            current_experiment.make_report()
            experiments.append(current_experiment)

    reports = {experiment.__repr__(): experiment.report for experiment in experiments}
    compressed_pickle(file, reports)
    open_reports = False
    del experiments

if open_reports:  # si la variable "reports" no está asignada
    reports = decompress_pickle(file)

percentage_more_than_one = []

for name, report in reports.items():
    plot_confussion_matrix(report, title=name, save_as=f'cms/{name}')
Exemplo n.º 15
0
            gamma=1e-3,
            eps=1e-8,
            weight_decay=0,
            amsbound=False,
        )
    # Adaptive learning rate
    # scheduler_lr_decay_sent = torch.optim.lr_scheduler.MultiStepLR(sent_optmizer, milestones=[0,1], gamma=0.7)

    return adv_optimizer


###############################################################################
# --- Setting up (hyper)parameters ---
###############################################################################
# Experiment folder
exp = Experiment(tensorboard=args.tensorboard)
exp.args = args
momentum = 0.9
log_interval = 50

# -- Deterministic run--
# os.environ['PYTHONHASHSEED'] = str(args.seed)
# np.random.seed(args.seed)
# random.seed(args.seed)
# torch.backends.cudnn.enabled = False
# torch.manual_seed(args.seed)
# torch.cuda.manual_seed_all(args.seed)
# if args.gpu:
#     torch.cuda.manual_seed(args.seed)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
Exemplo n.º 16
0
import pyphi
import numpy as np
from utils import Experiment
import pickle
pyphi.config.PARTITION_TYPE = 'TRI'
pyphi.config.MEASURE = 'BLD'

# Weights matrix
network = pickle.load(open('iv_manet5.0_network.pkl', 'rb'))

nN = network.cm.shape[0]
elements = list(range(nN))

cstate = [0 for x in elements]

nameformat = 'iv_manet'

experiment = Experiment(nameformat, '5.0', network, cstate)
experiment.initialize()

print('success!')