def evaluate(args):
    utils.ensure_dir(args['save_dir'])
    model_text_file = os.path.join(args['save_dir'],
                                   'TEXT_' + args['save_name_suffix'] + '.pt')
    model_news_file = os.path.join(args['save_dir'],
                                   'NEWS_' + args['save_name_suffix'] + '.pt')
    pretrain = Pretrain(args['vec_file'], logger_name='evaluate')
    use_cuda = args['cuda'] and not args['cpu']

    def _eval(eval_name):
        if eval_name == 'text':
            model_file = model_text_file
            test_file = args['test_text_file']
            output_file = os.path.join(
                args['output_file_path'],
                'TEST_Text_' + args['save_name_suffix'] + '.conllu')
            gold_file = args['test_text_file']
        elif eval_name == 'news':
            model_file = model_news_file
            test_file = args['test_news_file']
            output_file = os.path.join(
                args['output_file_path'],
                'TEST_News_' + args['save_name_suffix'] + '.conllu')
            gold_file = args['test_news_file']
        else:
            raise ValueError('bad eval name')
        trainer = Trainer(pretrain=pretrain,
                          model_file=model_file,
                          use_cuda=use_cuda)
        loaded_args, vocab = trainer.args, trainer.vocab
        test_batch = DataLoader(test_file,
                                1000,
                                args,
                                pretrain,
                                vocab=vocab,
                                evaluation=True)
        if len(test_batch) > 0:
            print(f"{eval_name} Start evaluation...")
            preds = []
            for i, b in enumerate(test_batch):
                preds += trainer.predict(b)
        else:
            preds = []
        test_batch.conll.set(['deps'], [y for x in preds for y in x])
        test_batch.conll.write_conll(output_file)
        if gold_file is not None:
            UAS, LAS = sdp_simple_scorer.score(output_file, gold_file)
            print(f"{eval_name}: Test Dataset Parser score:")
            print(
                f'{eval_name} LAS:{LAS * 100:.3f}\t{eval_name} UAS:{UAS * 100:.3f}'
            )

    _eval('text')
    _eval('news')
Пример #2
0

parser = argparse.ArgumentParser(description='Run simulation for given mass and mixing angle')
parser.add_argument('--mass', required=True)
parser.add_argument('--theta', required=True)
parser.add_argument('--tau', required=True)
parser.add_argument('--comment', default='')
args = parser.parse_args()

mass = float(args.mass) * UNITS.MeV
theta = float(args.theta)
lifetime = float(args.tau) * UNITS.s

folder = utils.ensure_dir(
    os.path.split(__file__)[0],
    "mass={:e}_tau={:e}_theta={:e}".format(mass / UNITS.MeV, lifetime / UNITS.s, theta)
    + args.comment
)

T_initial = 400. * UNITS.MeV
T_final = 0.0008 * UNITS.MeV
params = Params(T=T_initial,
                dy=0.05)

universe = Universe(params=params, folder=folder)

photon = Particle(**SMP.photon)

electron = Particle(**SMP.leptons.electron)
muon = Particle(**SMP.leptons.muon)
tau = Particle(**SMP.leptons.tau)
Пример #3
0
def main():
    ## Uninteresting setup, start up the visu process,...
    logfile = make_logfile_name()
    ensure_dir(logfile)
    f_h = logging.FileHandler(logfile)
    f_h.setLevel(SUBDEBUG)
    d_h = logging.StreamHandler()
    d_h.setLevel(INFO)
    utils.configure_loggers(debug_handler=d_h, file_handler=f_h)
    parent_conn, child_conn = multiprocessing.Pipe()
    p = multiprocessing.Process(
        target=visualisation.visualisation_process_f,
        name="display_process", args=(child_conn, LOGGER))
    p.start()

    pynnn.setup(timestep=SIMU_TIMESTEP)
    init_logging("logfile", debug=True)
    LOGGER.info("Simulation started with command: %s", sys.argv)

    ## Network setup
    # First population
    p1 = pynnn.Population(100, pynnn.IF_curr_alpha,
                          structure=pynnn.space.Grid2D())
    p1.set({'tau_m':20, 'v_rest':-65})
    # Second population
    p2 = pynnn.Population(20, pynnn.IF_curr_alpha,
                          cellparams={'tau_m': 15.0, 'cm': 0.9})
    # Projection 1 -> 2
    prj1_2 = pynnn.Projection(
        p1, p2, pynnn.AllToAllConnector(allow_self_connections=False),
        target='excitatory')
    # I may need to make own PyNN Connector class. Otherwise, this is
    # neat:  exponentially decaying probability of connections depends
    # on distance. Distance is only calculated using x and y, which
    # are on a toroidal topo with boundaries at 0 and 500.
    connector = pynnn.DistanceDependentProbabilityConnector(
        "exp(-abs(d))",
        space=pynnn.Space(
            axes='xy', periodic_boundaries=((0,500), (0,500), None)))
    # Alternately, the powerful connection set algebra (python CSA
    # module) can be used.
    weight_distr = pynnn.RandomDistribution(distribution='gamma',
                                            parameters=[1,0.1])
    prj1_2.randomizeWeights(weight_distr)

    # This one is in NEST but not in Brian:
    # source = pynnn.NoisyCurrentSource(
    #     mean=100, stdev=50, dt=SIMU_TIMESTEP, 
    #     start=10.0, stop=SIMU_DURATION, rng=pynnn.NativeRNG(seed=100)) 
    source = pynnn.DCSource(
        start=10.0, stop=SIMU_DURATION, amplitude=100) 
    source.inject_into(list(p1.sample(50).all()))

    p1.record(to_file=False)
    p2.record(to_file=False)

    ## Build and send the visualizable network structure
    adapter = pynn_to_visu.PynnToVisuAdapter(LOGGER)
    adapter.add_pynn_population(p1)
    adapter.add_pynn_population(p2)
    adapter.add_pynn_projection(p1, p2, prj1_2.connection_manager)
    adapter.commit_structure()
    
    parent_conn.send(adapter.output_struct)
    
    # Number of chunks to run the simulation:
    n_chunks = SIMU_DURATION // SIMU_TO_VISU_MESSAGE_PERIOD
    last_chunk_duration = SIMU_DURATION % SIMU_TO_VISU_MESSAGE_PERIOD
    # Run the simulator
    for visu_i in xrange(n_chunks):
        pynnn.run(SIMU_TO_VISU_MESSAGE_PERIOD)
        parent_conn.send(adapter.make_activity_update_message())
        LOGGER.debug("real current p1 spike counts: %s",
                     p1.get_spike_counts().values())
    if last_chunk_duration > 0:
        pynnn.run(last_chunk_duration)
        parent_conn.send(adapter.make_activity_update_message())
    # Cleanup
    pynnn.end()
    # Wait for the visualisation process to terminate
    p.join(VISU_PROCESS_JOIN_TIMEOUT)
def train(args):
    utils.ensure_dir(args['save_dir'])
    model_text_file = os.path.join(args['save_dir'],
                                   'TEXT_' + args['save_name_suffix'] + '.pt')
    model_news_file = os.path.join(args['save_dir'],
                                   'NEWS_' + args['save_name_suffix'] + '.pt')

    # load pretrained vectors
    pretrain = Pretrain(args['vec_file'], args['logger_name'])
    # TensorboardX:
    summary_writer = SummaryWriter()

    # load data
    logger.critical(f"Loading data with batch size {args['batch_size']}...")
    train_batch = DataLoader(args['train_merge_file'],
                             args['batch_size'],
                             args,
                             pretrain,
                             evaluation=False)
    vocab = train_batch.vocab
    dev_text_batch = DataLoader(args['dev_text_file'],
                                1000,
                                args,
                                pretrain,
                                vocab=vocab,
                                evaluation=True)
    dev_news_batch = DataLoader(args['dev_news_file'],
                                1000,
                                args,
                                pretrain,
                                vocab=vocab,
                                evaluation=True)

    # pred and gold path
    output_text_file = os.path.join(
        args['output_file_path'],
        'DEV_Text_' + args['save_name_suffix'] + '.conllu')
    output_news_file = os.path.join(
        args['output_file_path'],
        'DEV_News_' + args['save_name_suffix'] + '.conllu')

    # skip training if the language does not have training or dev data
    if len(train_batch) == 0 or len(dev_text_batch) == 0 or len(
            dev_news_batch) == 0:
        logger.info("Skip training because no data available...")
        exit()

    logger.info("Training parser...")
    trainer = Trainer(args=args,
                      vocab=vocab,
                      pretrain=pretrain,
                      use_cuda=args['cuda'])

    global_step = 0

    using_amsgrad = False
    task2idx = {'text': 0, 'news': 1}
    last_best_step = [0] * 2
    # start training
    train_loss = 0
    last_best_LAS = [0] * 2
    last_best_UAS = [0] * 2
    last_best_LAS_step = [0] * 2
    last_best_UAS_step = [0] * 2

    def _dev(dev_name):
        nonlocal last_best_LAS, last_best_UAS, last_best_LAS_step, last_best_UAS_step
        nonlocal train_loss, last_best_step, dev_text_batch, dev_news_batch
        # nonlocal output_news_file, output_text_file
        if dev_name == 'text':
            dev_batch = dev_text_batch
            output_file = output_text_file
            gold_file = args['dev_text_file']
            task_id = task2idx['text']
            model_file = model_text_file
        elif dev_name == 'news':
            dev_batch = dev_news_batch
            output_file = output_news_file
            gold_file = args['dev_news_file']
            task_id = task2idx['news']
            model_file = model_news_file
        else:
            raise ValueError('bad dev name')
        dev_preds = []
        for batch in dev_batch:
            preds = trainer.predict(batch, cuda_data=False)
            dev_preds += preds
            if args['cuda']:
                torch.cuda.empty_cache()
        dev_batch.conll.set(['deps'], [y for x in dev_preds for y in x])
        dev_batch.conll.write_conll(output_file)
        dev_uas, dev_score = sdp_simple_scorer.score(output_file, gold_file)
        logger.info(
            f"step:{global_step}; train_loss:{train_loss:0.5f}; dev_UAS:{dev_uas:.4f}; dev_LAS:{dev_score:.4f}"
        )
        summary_writer.add_scalars(f'data/eval_{dev_name}', {
            'ave_loss': train_loss,
            'dev_UAS': dev_uas,
            'dev_LAS': dev_score
        }, global_step)
        # train_loss = 0
        if dev_uas > last_best_UAS[task_id]:
            last_best_UAS[task_id] = dev_uas
            last_best_UAS_step[task_id] = global_step
        # save best model
        if dev_score > last_best_LAS[task_id]:
            last_best_LAS[task_id] = dev_score
            last_best_LAS_step[task_id] = global_step
            last_best_step[task_id] = global_step
            trainer.save(model_file)
            logger.info(
                f"{dev_name}: last_best_UAS:{last_best_UAS[task_id]:.4f} in step:{last_best_UAS_step[task_id]}"
            )
            logger.critical(
                f"{dev_name}: last_best_LAS:{last_best_LAS[task_id]:.4f} in step:{last_best_LAS_step[task_id]}"
            )
            logger.info(f"{dev_name}: new best model saved in {model_file}")

    while True:
        do_break = False
        for i, batch in enumerate(train_batch):
            # pprint(batch[0].size())
            # continue
            global_step += 1
            print('batch:')
            print(batch)
            loss = trainer.update(batch,
                                  global_step,
                                  cuda_data=False,
                                  eval=False)  # update step
            train_loss += loss
            if args['cuda']:
                torch.cuda.empty_cache()
            if global_step % args['log_step'] == 0:
                summary_writer.add_scalar('data/loss', loss, global_step)

            if global_step % args['eval_interval'] == 0:
                # eval on dev
                # logger.info("Evaluating on dev set...")
                train_loss = train_loss / args[
                    'eval_interval']  # avg loss per batch
                _dev('text')
                _dev('news')
                train_loss = 0
            if global_step - max(
                    last_best_step) >= args['max_steps_before_stop']:
                if not using_amsgrad:
                    logger.critical(
                        f"--->>> Optim:Switching to AMSGrad in step:{global_step}"
                    )
                    last_best_step = [global_step] * 2
                    using_amsgrad = True
                    trainer.optimizer = optim.Adam(trainer.model.parameters(),
                                                   amsgrad=True,
                                                   lr=args['lr'],
                                                   betas=(args['beta1'],
                                                          args['beta2']),
                                                   eps=args['eps'])
                else:
                    do_break = True
                    break

            if global_step >= args['max_steps']:
                do_break = True
                break

        if do_break:
            break

        train_batch.reshuffle()

    logger.critical(f"Training ended with {global_step} steps")
    logger.critical(
        f'Text: best dev LAS:{last_best_LAS[0]} in step:{last_best_LAS_step[0]}'
    )
    logger.critical(
        f'News: best dev LAS:{last_best_LAS[1]} in step:{last_best_LAS_step[1]}'
    )
    summary_writer.close()
Пример #5
0
from evolution import Universe
from common import UNITS, Params, utils, LogSpacedGrid
from interactions.four_particle.cpp.integral import CollisionIntegralKind

parser = argparse.ArgumentParser(description='Run simulation for given mass and mixing angle')
parser.add_argument('--mass', default=33.9)
parser.add_argument('--theta', default=0.031)
parser.add_argument('--comment', default='')
args = parser.parse_args()

mass = float(args.mass) * UNITS.MeV
theta = float(args.theta)

folder = utils.ensure_dir(
    os.path.split(__file__)[0],
    "output",
    "mass={:e}_theta={:e}".format(mass / UNITS.MeV, theta)
    + args.comment
)

T_initial = 50. * UNITS.MeV
T_final = 0.0008 * UNITS.MeV
params = Params(T=T_initial,
                dy=0.003125)

universe = Universe(params=params, folder=folder)

from common import LinearSpacedGrid
linear_grid = LinearSpacedGrid(MOMENTUM_SAMPLES=501, MAX_MOMENTUM=500*UNITS.MeV)
linear_grid_s = LinearSpacedGrid(MOMENTUM_SAMPLES=51, MAX_MOMENTUM=10*UNITS.MeV)

photon = Particle(**SMP.photon)
Пример #6
0
    description='Run simulation for given mass and mixing angle')
parser.add_argument('--mass', required=True)
parser.add_argument('--theta', required=True)
parser.add_argument('--tau', required=True)
parser.add_argument('--Tdec', default=100)
parser.add_argument('--comment', default='')
args = parser.parse_args()

mass = float(args.mass) * UNITS.MeV
theta = float(args.theta)
lifetime = float(args.tau) * UNITS.s
Tdec = float(args.Tdec) * UNITS.MeV

folder = utils.ensure_dir(
    op.split(__file__)[0],
    'output', "mass={:e}_theta={:e}_Tdec={:e}_tau={:e}".format(
        mass / UNITS.MeV, theta, Tdec / UNITS.MeV, lifetime / UNITS.s) +
    args.comment)

T_initial = Tdec
T_interactions_freeze_out = 0.005 * UNITS.MeV
T_final = 0.0008 * UNITS.MeV
params = Params(T=T_initial, dy=0.003125)

universe = Universe(params=params, folder=folder)

photon = Particle(**SMP.photon)
electron = Particle(**SMP.leptons.electron)
muon = Particle(**SMP.leptons.muon)
neutrino_e = Particle(**SMP.leptons.neutrino_e)
neutrino_mu = Particle(**SMP.leptons.neutrino_mu)
Пример #7
0
from evolution import Universe
from common import CONST, UNITS, Params, utils, LinearSpacedGrid, HeuristicGrid
from interactions.four_particle.cpp.integral import CollisionIntegralKind

parser = argparse.ArgumentParser(
    description='Run simulation for given mass and mixing angle')
parser.add_argument('--mass', default=300)
parser.add_argument('--theta', default=0.001)
parser.add_argument('--comment', default='')
args = parser.parse_args()

mass = float(args.mass) * UNITS.MeV
theta = float(args.theta)

folder = utils.ensure_dir(
    os.path.split(__file__)[0], 'output',
    "mass={:e}_theta={:e}".format(mass / UNITS.MeV, theta) + args.comment)

T_initial = 50. * UNITS.MeV
T_final = 0.0008 * UNITS.MeV
params = Params(T=T_initial, dy=0.0003125)

universe = Universe(params=params, folder=folder)

linear_grid_s = LinearSpacedGrid(MOMENTUM_SAMPLES=51,
                                 MAX_MOMENTUM=20 * UNITS.MeV)
linear_grid = LinearSpacedGrid(MOMENTUM_SAMPLES=51,
                               MAX_MOMENTUM=50 * UNITS.MeV)
photon = Particle(**SMP.photon)

#electron = Particle(**SMP.leptons.electron)
def train(args):
    utils.ensure_dir(args['save_dir'])
    model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
            else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])

    # load pretrained vectors
    vec_file = '../Embeds/sdp_vec.pkl'
    pretrain_file = '../save/sdp.pretrain.pt'
    pretrain = Pretrain(pretrain_file, vec_file)

    # load data
    print("Loading data with batch size {}...".format(args['batch_size']))
    train_batch = DataLoader(args['train_file'], args['batch_size'], args, pretrain, evaluation=False)
    vocab = train_batch.vocab
    dev_batch = DataLoader(args['eval_file'], args['batch_size'], args, pretrain, vocab=vocab, evaluation=True)

    # pred and gold path
    system_pred_file = args['output_file']
    gold_file = args['gold_file']

    # skip training if the language does not have training or dev data
    if len(train_batch) == 0 or len(dev_batch) == 0:
        print("Skip training because no data available...")
        exit()

    print("Training parser...")
    trainer = Trainer(args=args, vocab=vocab, pretrain=pretrain, use_cuda=args['cuda'])

    global_step = 0
    max_steps = args['max_steps']
    dev_score_history = []
    best_dev_preds = []
    current_lr = args['lr']
    global_start_time = time.time()
    format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'

    using_amsgrad = False
    last_best_step = 0
    # start training
    train_loss = 0
    while True:
        do_break = False
        for i, batch in enumerate(train_batch):
            start_time = time.time()
            global_step += 1
            loss = trainer.update(batch, eval=False) # update step
            train_loss += loss
            if global_step % args['log_step'] == 0:
                duration = time.time() - start_time
                print(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,\
                        max_steps, loss, duration, current_lr))

            if global_step % args['eval_interval'] == 0:
                # eval on dev
                print("Evaluating on dev set...")
                dev_preds = []
                for batch in dev_batch:
                    preds = trainer.predict(batch)
                    dev_preds += preds

                dev_batch.conll.set(['head', 'deprel'], [y for x in dev_preds for y in x])
                dev_batch.conll.write_conll(system_pred_file)
                _, _, dev_score = scorer.score(system_pred_file, gold_file)

                train_loss = train_loss / args['eval_interval'] # avg loss per batch
                print("step {}: train_loss = {:.6f}, dev_score = {:.4f}".format(global_step, train_loss, dev_score))
                train_loss = 0

                # save best model
                if len(dev_score_history) == 0 or dev_score > max(dev_score_history):
                    last_best_step = global_step
                    trainer.save(model_file)
                    print("new best model saved.")
                    best_dev_preds = dev_preds

                dev_score_history += [dev_score]
                print("")

            if global_step - last_best_step >= args['max_steps_before_stop']:
                if not using_amsgrad:
                    print("Switching to AMSGrad")
                    last_best_step = global_step
                    using_amsgrad = True
                    trainer.optimizer = optim.Adam(trainer.model.parameters(), amsgrad=True, lr=args['lr'], betas=(.9, args['beta2']), eps=1e-6)
                else:
                    do_break = True
                    break

            if global_step >= args['max_steps']:
                do_break = True
                break

        if do_break: break

        train_batch.reshuffle()

    print("Training ended with {} steps.".format(global_step))

    best_f, best_eval = max(dev_score_history)*100, np.argmax(dev_score_history)+1
    print("Best dev F1 = {:.2f}, at iteration = {}".format(best_f, best_eval * args['eval_interval']))
Пример #9
0
parser.add_argument('--Twashout', default=0.1)
parser.add_argument('--comment', default='')
args = parser.parse_args()

mass = float(args.mass) * UNITS.MeV
theta = float(args.theta)
lifetime = float(args.tau) * UNITS.s
Tdec = float(args.Tdec) * UNITS.MeV
T_washout = float(args.Twashout) * UNITS.MeV

folder = utils.ensure_dir(
    op.split(__file__)[0],
    'output',
    "mass={mass:e}_tau={tau:e}_theta={theta:e}_Tdec={Tdec:e}_Twashout={Twashout:e}"
    .format(
        mass=mass / UNITS.MeV,
        tau=lifetime / UNITS.s, theta=theta,
        Tdec=Tdec / UNITS.MeV,
        Twashout=T_washout / UNITS.MeV
    ) + args.comment
)


T_initial = Tdec
T_weak_decoupling = 5 * UNITS.MeV
T_final = 0.0008 * UNITS.MeV
params = Params(T=T_initial,
                dy=0.003125*4)

universe = Universe(params=params, folder=folder)
Пример #10
0
os.environ['SPLIT_COLLISION_INTEGRAL'] = ''

from particles import Particle
from library.SM import particles as SMP, interactions as SMI
from library.NuMSM import particles as NuP, interactions as NuI
from interactions.four_particle.cpp.integral import CollisionIntegralKind
from evolution import Universe
from common import UNITS, Params, utils, LinearSpacedGrid, LogSpacedGrid
from scipy.integrate import simps
import numpy as np

mass = 150 * UNITS.MeV
theta = 1e-3

folder = utils.ensure_dir(
    os.path.split(__file__)[0], "output",
    "mass={:e}_theta={:e}_3p".format(mass / UNITS.MeV, theta))

T_initial = 5. * UNITS.MeV
T_final = 0.8 * UNITS.MeV
params = Params(T=T_initial, dy=0.003125)

universe = Universe(params=params, folder=folder)

from common import LinearSpacedGrid
linear_grid = LinearSpacedGrid(MOMENTUM_SAMPLES=1001,
                               MAX_MOMENTUM=500 * UNITS.MeV)
linear_grid_nu = LinearSpacedGrid(MOMENTUM_SAMPLES=1001,
                                  MAX_MOMENTUM=200 * UNITS.MeV)
linear_grid_s = LinearSpacedGrid(MOMENTUM_SAMPLES=51,
                                 MAX_MOMENTUM=20 * UNITS.MeV)
Пример #11
0
from common import UNITS, Params, utils, HeuristicGrid, LogSpacedGrid

parser = argparse.ArgumentParser(
    description='Run simulation for given mass and mixing angle')
parser.add_argument('--mass', required=True)
parser.add_argument('--theta', required=True)
parser.add_argument('--tau', required=True)
parser.add_argument('--comment', default='')
args = parser.parse_args()

mass = float(args.mass) * UNITS.MeV
theta = float(args.theta)
lifetime = float(args.tau) * UNITS.s

folder = utils.ensure_dir(
    os.path.split(__file__)[0], "output",
    "mass={:e}_tau={:e}_theta={:e}".format(
        mass / UNITS.MeV, lifetime / UNITS.s, theta) + args.comment)

T_initial = 400. * UNITS.MeV
T_final = 0.0008 * UNITS.MeV
params = Params(T=T_initial, dy=0.05)

universe = Universe(params=params, folder=folder)

photon = Particle(**SMP.photon)

electron = Particle(**SMP.leptons.electron)
muon = Particle(**SMP.leptons.muon)
tau = Particle(**SMP.leptons.tau)

neutrino_e = Particle(**SMP.leptons.neutrino_e)
Пример #12
0
parser.add_argument('--mass', required=True)
parser.add_argument('--theta', required=True)
parser.add_argument('--tau', required=True)
parser.add_argument('--Tdec', default=100)
parser.add_argument('--comment', default='')
args = parser.parse_args()

mass = float(args.mass) * UNITS.MeV
theta = float(args.theta)
lifetime = float(args.tau) * UNITS.s
Tdec = float(args.Tdec) * UNITS.MeV

folder = utils.ensure_dir(
    op.split(__file__)[0],
    'output',
    "mass={:e}_theta={:e}_Tdec={:e}_tau={:e}".format(
        mass / UNITS.MeV, theta, Tdec / UNITS.MeV, lifetime / UNITS.s
    ) + args.comment
)


T_initial = Tdec
T_interactions_freeze_out = 0.005 * UNITS.MeV
T_final = 0.0008 * UNITS.MeV
params = Params(T=T_initial,
                dy=0.003125)

universe = Universe(params=params, folder=folder)

photon = Particle(**SMP.photon)
electron = Particle(**SMP.leptons.electron)
Пример #13
0
def main():
    ## Uninteresting setup, start up the visu process,...
    logfile = make_logfile_name()
    ensure_dir(logfile)
    f_h = logging.FileHandler(logfile)
    f_h.setLevel(SUBDEBUG)
    d_h = logging.StreamHandler()
    d_h.setLevel(INFO)
    utils.configure_loggers(debug_handler=d_h, file_handler=f_h)
    parent_conn, child_conn = multiprocessing.Pipe()
    p = multiprocessing.Process(target=visualisation.visualisation_process_f,
                                name="display_process",
                                args=(child_conn, LOGGER))
    p.start()

    pynnn.setup(timestep=SIMU_TIMESTEP)
    init_logging("logfile", debug=True)
    LOGGER.info("Simulation started with command: %s", sys.argv)

    ## Network setup
    # First population
    p1 = pynnn.Population(100,
                          pynnn.IF_curr_alpha,
                          structure=pynnn.space.Grid2D())
    p1.set({'tau_m': 20, 'v_rest': -65})
    # Second population
    p2 = pynnn.Population(20,
                          pynnn.IF_curr_alpha,
                          cellparams={
                              'tau_m': 15.0,
                              'cm': 0.9
                          })
    # Projection 1 -> 2
    prj1_2 = pynnn.Projection(
        p1,
        p2,
        pynnn.AllToAllConnector(allow_self_connections=False),
        target='excitatory')
    # I may need to make own PyNN Connector class. Otherwise, this is
    # neat:  exponentially decaying probability of connections depends
    # on distance. Distance is only calculated using x and y, which
    # are on a toroidal topo with boundaries at 0 and 500.
    connector = pynnn.DistanceDependentProbabilityConnector(
        "exp(-abs(d))",
        space=pynnn.Space(axes='xy',
                          periodic_boundaries=((0, 500), (0, 500), None)))
    # Alternately, the powerful connection set algebra (python CSA
    # module) can be used.
    weight_distr = pynnn.RandomDistribution(distribution='gamma',
                                            parameters=[1, 0.1])
    prj1_2.randomizeWeights(weight_distr)

    # This one is in NEST but not in Brian:
    # source = pynnn.NoisyCurrentSource(
    #     mean=100, stdev=50, dt=SIMU_TIMESTEP,
    #     start=10.0, stop=SIMU_DURATION, rng=pynnn.NativeRNG(seed=100))
    source = pynnn.DCSource(start=10.0, stop=SIMU_DURATION, amplitude=100)
    source.inject_into(list(p1.sample(50).all()))

    p1.record(to_file=False)
    p2.record(to_file=False)

    ## Build and send the visualizable network structure
    adapter = pynn_to_visu.PynnToVisuAdapter(LOGGER)
    adapter.add_pynn_population(p1)
    adapter.add_pynn_population(p2)
    adapter.add_pynn_projection(p1, p2, prj1_2.connection_manager)
    adapter.commit_structure()

    parent_conn.send(adapter.output_struct)

    # Number of chunks to run the simulation:
    n_chunks = SIMU_DURATION // SIMU_TO_VISU_MESSAGE_PERIOD
    last_chunk_duration = SIMU_DURATION % SIMU_TO_VISU_MESSAGE_PERIOD
    # Run the simulator
    for visu_i in xrange(n_chunks):
        pynnn.run(SIMU_TO_VISU_MESSAGE_PERIOD)
        parent_conn.send(adapter.make_activity_update_message())
        LOGGER.debug("real current p1 spike counts: %s",
                     p1.get_spike_counts().values())
    if last_chunk_duration > 0:
        pynnn.run(last_chunk_duration)
        parent_conn.send(adapter.make_activity_update_message())
    # Cleanup
    pynnn.end()
    # Wait for the visualisation process to terminate
    p.join(VISU_PROCESS_JOIN_TIMEOUT)