コード例 #1
0
if only_predict:
    custom_path = sys.argv[1]
shot_num = int(sys.argv[2])
print("predicting using path {} on shot {}".format(custom_path, shot_num))

assert(only_predict)

#####################################################
#                 NORMALIZATION                     #
#####################################################
# TODO(KGF): identical in at least 3x files in examples/
# make sure preprocessing has been run, and is saved as a file
if task_index == 0:
    # TODO(KGF): check tuple unpack
    (shot_list_train, shot_list_validate,
     shot_list_test) = guarantee_preprocessed(conf)
comm.Barrier()
(shot_list_train, shot_list_validate,
 shot_list_test) = guarantee_preprocessed(conf)

shot_list = sum([l.filter_by_number([shot_num])
                 for l in [shot_list_train, shot_list_validate,
                           shot_list_test]], ShotList())
assert(len(shot_list) == 1)
# for s in shot_list.shots:
# s.restore()


def chunks(l, n):
    """Yield successive n-sized chunks from l."""
    return[l[i:i + n] for i in range(0, len(l), n)]
コード例 #2
0
ファイル: mpi_learn.py プロジェクト: ge-dong/plasma-python
only_predict = len(sys.argv) > 1
custom_path = None
if only_predict:
    custom_path = sys.argv[1]
    g.print_unique("predicting using path {}".format(custom_path))

#####################################################
#                 NORMALIZATION                     #
#####################################################
normalizer = Normalizer(conf)
if g.task_index == 0:
    # make sure preprocessing has been run, and results are saved to files
    # if not, only master MPI rank spawns thread pool to perform preprocessing
    (shot_list_train, shot_list_validate,
     shot_list_test) = guarantee_preprocessed(conf)
    # similarly, train normalizer (if necessary) w/ master MPI rank only
    normalizer.train()  # verbose=False only suppresses if purely loading
g.comm.Barrier()
g.print_unique("begin preprocessor+normalization (all MPI ranks)...")
# second call has ALL MPI ranks load preprocessed shots from .npz files
(shot_list_train, shot_list_validate,
 shot_list_test) = guarantee_preprocessed(conf, verbose=True)
# second call to normalizer training
normalizer.conf['data']['recompute_normalization'] = False
normalizer.train(verbose=True)
# KGF: may want to set it back...
# normalizer.conf['data']['recompute_normalization'] = conf['data']['recompute_normalization']   # noqa
loader = Loader(conf, normalizer)
g.print_unique("...done")
コード例 #3
0
random.seed(task_index)
if task_index == 0:
    pprint(conf)

only_predict = len(sys.argv) > 1
custom_path = None
if only_predict:
    custom_path = sys.argv[1]
print("predicting using path {}".format(custom_path))

assert(only_predict)
#####################################################
####################Normalization####################
#####################################################
if task_index == 0: #make sure preprocessing has been run, and is saved as a file
    shot_list_train,shot_list_validate,shot_list_test = guarantee_preprocessed(conf)
comm.Barrier()
shot_list_train,shot_list_validate,shot_list_test = guarantee_preprocessed(conf)


def chunks(l, n):
    """Yield successive n-sized chunks from l."""
    return[ l[i:i + n] for i in range(0, len(l), n)]

def hide_signal_data(shot,t=0,sigs_to_hide=None):
    for sig in shot.signals:
        if sigs_to_hide is None or (sigs_to_hide is not None and sig in sigs_to_hide):
            shot.signals_dict[sig][t:,:] = shot.signals_dict[sig][t,:]

def create_shot_list_tmp(original_shot,time_points,sigs=None):
    shot_list_tmp = ShotList()
コード例 #4
0
random.seed(task_index)
if task_index == 0:
    pprint(conf)

# Wouter: I adapted this to make the first argument the config file.
only_predict = len(sys.argv) > 2
custom_path = None
if only_predict:
    custom_path = sys.argv[2]
    print("predicting using path {}".format(custom_path))

#####################################################
####################Normalization####################
#####################################################
if task_index == 0:  #make sure preprocessing has been run, and is saved as a file
    shot_list_train, shot_list_validate, shot_list_test = guarantee_preprocessed(
        conf)
comm.Barrier()
shot_list_train, shot_list_validate, shot_list_test = guarantee_preprocessed(
    conf)

print("normalization", end='')
normalizer = Normalizer(conf)
normalizer.train()
loader = Loader(conf, normalizer)
print("...done")

#ensure training has a separate random seed for every worker
np.random.seed(task_index)
random.seed(task_index)
if not only_predict:
    mpi_train(conf, shot_list_train, shot_list_validate, loader)
コード例 #5
0
from plasma.preprocessor.preprocess import guarantee_preprocessed
import random
import numpy as np

from plasma.conf import conf
from pprint import pprint
pprint(conf)

#####################################################
#                PREPROCESSING                      #
#####################################################
np.random.seed(0)
random.seed(0)
guarantee_preprocessed(conf, verbose=True)
コード例 #6
0
from plasma.preprocessor.preprocess import guarantee_preprocessed
import random
import numpy as np

from plasma.conf import conf
from pprint import pprint

pprint(conf)

#####################################################
#                PREPROCESSING                      #
#####################################################
np.random.seed(0)
random.seed(0)
guarantee_preprocessed(conf)
コード例 #7
0
from __future__ import print_function
import os
import sys 
import time
import datetime
import random
import numpy as np

from plasma.conf import conf
from pprint import pprint
pprint(conf)
from plasma.preprocessor.preprocess import guarantee_preprocessed

#####################################################
####################PREPROCESSING####################
#####################################################
np.random.seed(0)
random.seed(0)
guarantee_preprocessed(conf)