Пример #1
0
    def init_params(self):
        
        if not hasattr(self.params, "__len__"):
            print("hyper-parameters not given, random initialization...")
            my_params = get_params(self.use_kernels,
                                   self.use_means)

            mean_params = my_params["means"]
            std_params = my_params["stds"]
            use_log = my_params["use_log"]

            self.params = list(np.zeros_like(mean_params))
            for k in range(len(self.params)):
                if use_log[k]:
                    self.params[k] = scipy.stats.lognorm.rvs(1,
                                                             loc=mean_params[
                                                                 k],
                                                             scale=std_params[k])
                else:
                    self.params[k] = scipy.stats.norm.rvs(loc=mean_params[k],
                                                          scale=std_params[k])
                    
            self.update_scales()
                    
            self.tune_hyperparameters()
Пример #2
0
def deepvision_search(query_path, fetch_limit):
    print "query_path :: ", query_path
    # Get the mentioned params
    params = get_params()

    # Read image lists
    dimension = params['dimension']

    # Load featurs for the input image.
    E = Extractor(params)

    print "Extracting features for the input image."

    # Init empty np array of size 1 to store the input query features
    query_feats = np.zeros((1, dimension))

    # Extract raw feature from cnn
    feat = E.extract_feat_image(query_path).squeeze()

    # Compose single feature vector
    feat = E.pool_feats(feat)
    query_feats[0, :] = feat
    query_feats = normalize(query_feats)

    print "Computing distances"
    distances = get_distances(query_feats, db_feats)
    final_scores = distances
    print "Distances :: ", final_scores

    # Reding the db images to form a map of image and their respective scores
    with open(params['frame_list'], 'r') as f:
        database_list = f.read().splitlines()

    ranking = np.array(database_list)[np.argsort(final_scores)]
    return ranking[0][:int(fetch_limit)]
def return_grad(test_params, input_x, truth_y):
    tmp = get_params(model_ft.models_stack[-1])
    set_params(model_ft.models_stack[-1], test_params)
    result = numpy.concatenate(
        [numpy.array(i).flatten() for i in fun_grad(input_x, truth_y)])
    set_params(model_ft.models_stack[-1], tmp)
    return result
Пример #4
0
def test_select(self):
    params = get_params("/data/zjy/csqa_data",
                        "/home/zhangjingyao/preprocessed_data_10k")

    # ls = LuceneSearch(params["lucene_dir"])
    # 读取知识库
    # try:
    #     print("loading...")
    #     wikidata = pickle.load(open('/home/zhangjingyao/data/wikidata.pkl','rb'))
    #     print("loading...")
    #     item_data = pickle.load(open('/home/zhangjingyao/data/entity_items','rb'))
    #     print("loading...")
    #     prop_data = None
    #     print("loading...")
    #     child_par_dict = pickle.load(open('/home/zhangjingyao/data/type_kb.pkl','rb'))
    # except:
    # wikidata, item_data, prop_data, child_par_dict = load_wikidata(params["wikidata_dir"])# data for entity ,property, type

    # 读取qa文件集
    qa_set = load_qadata("/home/zhangjingyao/preprocessed_data_10k/demo")
    question_parser = QuestionParser(params, True)

    f = open("log.txt", 'w+')
    for qafile in qa_set.itervalues():
        for qid in range(len(qafile["context"])):
            # 得到一个qa数据
            q = {k: v[qid] for k, v in qafile.items()}

            # 解析问句
            qstring = q["context_utterance"]
            entities = question_parser.getNER(q)
            relations = question_parser.getRelations(q)
            types = question_parser.getTypes(q)

            # 得到操作序列
            states = random.randint(1, 18)  # 随机生成操作序列
            seq2seq = Seq2Seq()
            symbolic_seq = seq2seq.simple(qstring, entities, relations, types,
                                          states)

            # 符号执行
            time_start = time.time()
            symbolic_exe = symbolics.Symbolics(symbolic_seq)
            answer = symbolic_exe.executor()

            print("answer is :", answer)
            if (type(answer) == dict):
                for key in answer:
                    print([v for v in answer[key]])

            time_end = time.time()
            print('time cost:', time_end - time_start)
            print(
                "--------------------------------------------------------------------------------"
            )

    print(0)
Пример #5
0
def deepvision_search(query_path, fetch_limit):
    # Get the mentioned params
    params = get_params()
    global pca, db_feats

    # Read image lists
    dataset = params['dataset']
    image_path = params['database_images']
    dimension = params['dimension']
    pooling = params['pooling']
    N_QE = params['N_QE']
    stage = params['stage']

    # Distance type
    dist_type = params['distance']

    # Load features for the DB Images.
    db_feats = pickle.load(open(params['database_feats'], 'rb'))

    # Load featurs for the input image.
    E = Extractor(params)

    print "Extracting features for the input image."

    # Init empty np array of size 1 to store the input query features
    query_feats = np.zeros((1, dimension))

    # Extract raw feature from cnn
    feat = E.extract_feat_image(query_path).squeeze()

    # Compose single feature vector
    feat = E.pool_feats(feat)
    query_feats[0, :] = feat
    query_feats = normalize(query_feats)

    print "Computing distances"
    distances = get_distances(query_feats, db_feats)
    final_scores = distances
    print "Distances :: ", final_scores

    # Reding the db images to form a map of image and their respective scores
    with open(params['frame_list'], 'r') as f:
        database_list = f.read().splitlines()

    ranking = np.array(database_list)[np.argsort(final_scores)]

    #return ranking[0][:int(fetch_limit)]

    # Temporary fix done for resolving the issue incurred when the number of images is less than 512.
    modified_lst = []
    for image in ranking[0]:
        # In order to increase the image count to 512, copies of images with the suffix copy are created. The below condition filters these images.
        if "copy" in image:
            continue
        modified_lst.append(image)

    return modified_lst[:int(fetch_limit)]
Пример #6
0
    def update_scales(self, nyquist_freq=0.5):

        print(
            "initial parameters updated with uniform drawn within nyquist frequency")

        params = get_params(self.use_kernels,
                            self.use_means)

        for k in range(len(self.params)):
            curr_name = params["names"][k]
            if "period" in curr_name or "scale" in curr_name:
                freq = scipy.stats.uniform.rvs() * nyquist_freq
                self.params[k] = 1. / freq
Пример #7
0
def main():
    params = get_params()

    datasetD = make_seq_2_seq_dataset(params)
    train_x = datasetD['train']['x']
    train_y = datasetD['train']['y']
    test_x = datasetD['test']['x']
    test_y = datasetD['test']['y']
    train_scenarios = datasetD['train']['scenarios']
    test_scenarios = datasetD['test']['scenarios']
    val_scenarios = datasetD['val']['scenarios']
    params.scaleD = datasetD['scaleD']

    input_window_samps = params.input_window_length_samples
    num_signals = params.num_signals
    output_window_samps = params.output_window_length_samples

    train_X = np.array(train_x, dtype=float)
    # train_X=np.reshape(train_X,(input_window_samps,-1))

    #
    train_Y = np.array(train_y, dtype=float)
    nsamples, nx, ny = train_Y.shape
    # print(nsamples)
    # print(nx)
    # print(ny)

    train_Y = np.reshape(train_Y,
                         (nsamples, output_window_samps * num_signals))
    print(train_X.shape)
    #print(len(train_X[0,:]))
    print(train_Y.shape)

    print(train_Y)

    # print(input_window_samps)
    # print(num_signals)
    # print(output_window_samps)
    # train_x = Reshape((input_window_samps,num_signals))(train_x)
    # print(train_x.shape)
    # print(train_x[0,:])
    #
    # train_y = Reshape((output_window_samps, num_signals))(train_y)
    # print(train_y.shape)
    # print(train_y[0,:])

    model = SVR(kernel='rbf', degree=3)
    history = model.fit(train_X, train_Y)
    history_score = history.score(test_x, test_y)
    print("the score of linear is : %f" % history_score)
Пример #8
0
    def __init__(self, dataset):
        self.dataset = dataset

        ###################
        # training params #
        ###################
        self.args = get_params(dataset)
        torch.manual_seed(self.args.random_seed)

        ###################
        # get dataloaders #
        ###################
        kwargs = {'num_workers': 8, 'pin_memory': True}
        self.train_loader, self.test_loader = get_dataloaders(
            dataset, **kwargs)

        ######################
        # Initialize Network #
        ######################
        self.net = get_classifier(dataset)
        if self.args.cuda:
            self.net = torch.nn.DataParallel(self.net, device_ids=[0])
            self.net = self.net.cuda()

        ########################
        # Initialize Optimizer #
        ########################
        self.optimizer = optim.SGD(self.net.parameters(),
                                   lr=self.args.learning_rate,
                                   momentum=self.args.momentum)

        #####################
        # Initialize Losses #
        #####################
        self.train_losses = []
        self.train_counter = []
        self.test_losses = []
        self.test_counter = [
            i * len(self.train_loader.dataset)
            for i in range(self.args.n_epochs + 1)
        ]

        ##########################
        # Checkpoint data Losses #
        ##########################
        self.curr_best = 0.0
        self.best_net_state = None
        self.best_optimizer_state = None
Пример #9
0
def main():
    # tf.compat.v1.enable_v2_behavior()
    print("tensorflow version =", tf.__version__)
    # get and save params of this run
    params = get_params()

    # dataset = Seq2SeqDataset_copy(
    #     input_path=params.input_dir,
    #     input_window_length_samples =params.input_window_length_samples,
    #     output_window_length_samples=params.output_window_length_samples,
    # )

    # train_dataset = tf.data.Dataset.from_generator((train_x, train_y),output_types=(tf.float64,tf.float64))
    # train_dataset = train_dataset.shuffle(buffer_size=100000)
    # train_dataset = train_dataset.repeat()

    datasetD = make_seq_2_seq_dataset(params)

    train_x = datasetD['train']['x']
    train_y = datasetD['train']['y']
    test_x = datasetD['test']['x']
    test_y = datasetD['test']['y']
    val_x = datasetD['val']['x']
    val_y = datasetD['val']['y']

    train_scenarios = datasetD['train']['scenarios']
    test_scenarios = datasetD['test']['scenarios']
    val_scenarios = datasetD['val']['scenarios']
    params.scaleD = datasetD['scaleD']  # store scaleD in params_out.yml

    #model = create_model(params)
    #model.compile(optimizer=params.optimizer,
    #              loss=params.loss,
    #             metrics=get_metrics(params))
    input_window_samps = params.input_window_length_samples
    num_signals = params.num_signals
    output_window_samps = params.output_window_length_samples

    train_x = tf.reshape(train_x, [input_window_samps, num_signals])
    train_y = tf.reshape(train_y, [output_window_samps, num_signals])
    test_x = tf.reshape(test_x, [input_window_samps, num_signals])
    test_y = tf.reshape(test_y, [output_window_samps, num_signals])
    model = SVR(kernel='rbf', degree=3)
    history = model.fit([train_x], [train_y])
    history_score = history.score(test_x, test_y)
    print("the score of linear is : %f" % history_score)
Пример #10
0
def corrbin(argv):
    """
    Function to correlate one source with all the receivers. 
    Usage: python CorrelBinsSingleRotation.py --help to display help message"
    """

    t0 = time.time()
    binfile = ''
    verbose = False
    params = get_params()
    nthreads = params['nthreads']
    corrType = params['corrType']

    try:
        opts, args = getopt.getopt(argv, "hp:d:s:c:t:v", [
            "help", "path2bin=", "date=", "station=", "components", "threads=",
            "verbose"
        ])
    except getopt.GetoptError, e:
        print e
        print "correlbins.py -p <path2bin> -d <date> -s <sourceName> -c <components> -t <nthreads> -v <verbose> -h <help>"
        sys.exit(2)
Пример #11
0
import tensorflow as tf
from collections import deque
from data import Data, Batch
from data_utils import *
from db_engine import DbEngine, QueryGenerator
from evaluation import evaluate
from itertools import chain
from memn2n.memn2n_dialog_generator import MemN2NGeneratorDialog
from operator import itemgetter
from params import get_params, print_params
from reward import calculate_reward
from six.moves import range, reduce
from sklearn import metrics
#from tqdm import tqdm

args = get_params()
glob = {}


class chatBot(object):
    def __init__(self):
        # Create Model Store Directory
        self.run_id = ("task" + str(args.task_id) + "_" +
                       args.data_dir.split('/')[-2] + "_lr-" +
                       str(args.learning_rate) + "_hops-" + str(args.hops) +
                       "_emb-size-" + str(args.embedding_size) + "_sw-" +
                       str(args.soft_weight) + "_wd-" +
                       str(args.word_drop_prob) + "_pw-" +
                       str(args.p_gen_loss_weight) + "_rlmode-" +
                       str(args.rl_mode) + "_idx-" + str(args.model_index) +
                       "_pi_b-" + str(args.pi_b))
Пример #12
0
def main():
    """Balance of plant of a boiling water nuclear reactor.

    Attributes
    ----------
    end_time: float
        End of the flow time in SI unit.
    time_step: float
        Size of the time step between port communications in SI unit.
    use_mpi: bool
        If set to `True` use MPI otherwise use Python multiprocessing.

    """

    # Preamble

    end_time = 30.0 * unit.minute
    time_step = 30.0  # seconds
    show_time = (True, 5 * unit.minute)

    use_mpi = False  # True for MPI; False for Python multiprocessing
    plot_results = True  # True for enabling plotting section below
    params = get_params()  # parameters for BoP BWR

    #*****************************************************************************
    # Define Cortix system

    # System top level
    plant = Cortix(use_mpi=use_mpi, splash=True)

    # Network
    plant_net = plant.network = Network()

    params['start-time'] = 0.0
    params['end-time'] = end_time
    params['shutdown-time'] = 999.0 * unit.hour
    params['shutdown-mode'] = False
    #*****************************************************************************
    # Create reactor module
    reactor = BWR(params)

    reactor.name = 'BWR'
    reactor.save = True
    reactor.time_step = time_step
    reactor.end_time = end_time
    reactor.show_time = show_time
    reactor.RCIS = True

    # Add reactor module to network
    plant_net.module(reactor)

    #*****************************************************************************
    # Create turbine high pressure module
    params['turbine_inlet_pressure'] = 2
    params['turbine_outlet_pressure'] = 0.5
    params['high_pressure_turbine'] = True

    #params_turbine = reactor.params
    #params_turbine.inlet_pressure = 2
    #params.turbine_outlet_pressure = 0.5

    turbine_hp = Turbine(params)

    turbine_hp.name = 'High Pressure Turbine'
    turbine_hp.save = True
    turbine_hp.time_step = time_step
    turbine_hp.end_time = end_time

    # Add turbine high pressure module to network
    plant_net.module(turbine_hp)

    #*****************************************************************************
    # Create turbine low pressure module
    params['turbine_inlet_pressure'] = 0.5
    params['turbine_outlet_pressure'] = 0.005
    params['high_pressure_turbine'] = False
    params['steam flowrate'] = params['steam flowrate'] / 2

    turbine_lp1 = Turbine(params)

    turbine_lp1.name = 'Low Pressure Turbine 1'
    turbine_lp1.save = True
    turbine_lp1.time_step = time_step
    turbine_lp1.end_time = end_time

    plant_net.module(turbine_lp1)

    #*****************************************************************************
    # Create turbine low pressure module
    params['turbine_inlet_pressure'] = 0.5
    params['turbine_outlet_pressure'] = 0.005
    params['high_pressure_turbine'] = False

    turbine_lp2 = Turbine(params)

    turbine_lp2.name = 'Low Pressure Turbine 2'
    turbine_lp2.save = True
    turbine_lp2.time_step = time_step
    turbine_lp2.end_time = end_time

    plant_net.module(turbine_lp2)

    #*****************************************************************************
    # Create condenser module
    params['steam flowrate'] = params['steam flowrate'] * 2

    condenser = Condenser()

    condenser.name = 'Condenser'
    condenser.save = True
    condenser.time_step = time_step
    condenser.end_time = end_time

    plant_net.module(condenser)

    #*****************************************************************************
    params['RCIS-shutdown-time'] = 5 * unit.minute
    rcis = Cooler(params)
    rcis.name = 'RCIS'
    rcis.save = True
    rcis.time_step = time_step
    rcis.end_time = end_time

    plant_net.module(rcis)

    #*****************************************************************************
    # Create the BoP network connectivity
    plant_net.connect([reactor, 'coolant-outflow'], [turbine_hp, 'inflow'])
    plant_net.connect([turbine_hp, 'outflow-1'], [turbine_lp1, 'inflow'])
    plant_net.connect([turbine_hp, 'outflow-2'], [turbine_lp2, 'inflow'])
    plant_net.connect([turbine_lp1, 'outflow-1'], [condenser, 'inflow-1'])
    plant_net.connect([turbine_lp2, 'outflow-1'], [condenser, 'inflow-2'])
    plant_net.connect([condenser, 'outflow'], [reactor, 'coolant-inflow'])
    plant_net.connect([reactor, 'RCIS-outflow'], [rcis, 'coolant-inflow'])
    plant_net.connect([rcis, 'coolant-outflow'], [reactor, 'RCIS-inflow'])
    #plant_net.connect([rcis, 'signal-in'], [reactor, 'signal-out'])

    plant_net.draw(engine='dot', node_shape='folder')
    #*****************************************************************************
    # Run network dynamics simulation
    plant.run()

    #*****************************************************************************
    # Plot results

    if plot_results and (plant.use_multiprocessing or plant.rank == 0):

        # Reactor plots
        reactor = plant_net.modules[0]

        (quant, time_unit
         ) = reactor.neutron_phase.get_quantity_history('neutron-dens')
        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']')

        plt.grid()
        plt.savefig('startup-neutron-dens.png', dpi=300)

        (quant, time_unit
         ) = reactor.neutron_phase.get_quantity_history('delayed-neutrons-cc')
        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']')

        plt.grid()
        plt.savefig('startup-delayed-neutrons-cc.png', dpi=300)

        (quant, time_unit
         ) = reactor.coolant_outflow_phase.get_quantity_history('temp')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']')

        plt.grid()
        plt.savefig('startup-coolant-outflow-temp.png', dpi=300)

        (quant,
         time_unit) = reactor.reactor_phase.get_quantity_history('fuel-temp')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']')
        plt.grid()
        plt.savefig('startup-fuel-temp.png', dpi=300)

        # Turbine high pressure plots
        turbine_hp = plant_net.modules[1]

        (quant,
         time_unit) = turbine_hp.outflow_phase.get_quantity_history('power')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']',
                   title='High Pressure Turbine Power')
        plt.grid()
        plt.savefig('startup-turbine-hp-power.png', dpi=300)

        (quant,
         time_unit) = turbine_hp.outflow_phase.get_quantity_history('temp')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']',
                   title='High Pressure Turbine Outflow Temperature')
        plt.grid()
        plt.savefig('startup-turbine-hp-outflow-temp.png', dpi=300)

        # Turbine low pressure graphs
        turbine_lp1 = plant_net.modules[2]

        (quant,
         time_unit) = turbine_lp1.outflow_phase.get_quantity_history('power')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']',
                   title='Lower Pressure Turbine 1 Power')
        plt.grid()
        plt.savefig('startup-turbine-lp1-power.png', dpi=300)

        (quant,
         time_unit) = turbine_lp1.outflow_phase.get_quantity_history('temp')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']',
                   title='Lower Pressure Turbine 1 Outflow Temperature')
        plt.grid()
        plt.savefig('startup-turbine-lp1-outflow-temp.png', dpi=300)

        # Condenser graphs
        condenser = plant_net.modules[3]

        (quant,
         time_unit) = condenser.outflow_phase.get_quantity_history('temp')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']')
        plt.grid()
        plt.savefig('startup-condenser-outflow-temp.png', dpi=300)

    #setup initial values for simulation
    turbine1_outflow_temp = turbine_hp.outflow_phase.get_value(
        'temp', end_time)
    turbine1_chi = turbine_hp.outflow_phase.get_value('quality', end_time)
    turbine1_power = turbine_hp.outflow_phase.get_value('power', end_time)

    turbine2_outflow_temp = turbine_lp1.outflow_phase.get_value(
        'temp', end_time)
    turbine2_chi = turbine_lp1.outflow_phase.get_value('quality', end_time)
    turbine2_power = turbine_lp1.outflow_phase.get_value('power', end_time)

    condenser_runoff_temp = condenser.outflow_phase.get_value('temp', end_time)

    delayed_neutron_cc = reactor.neutron_phase.get_value(
        'delayed-neutrons-cc', end_time)
    n_dens = reactor.neutron_phase.get_value('neutron-dens', end_time)
    fuel_temp = reactor.reactor_phase.get_value('fuel-temp', end_time)
    coolant_temp = reactor.coolant_outflow_phase.get_value('temp', end_time)
    # Values loaded into params when they are needed (module instantiation)

    # Properly shutdown simulation
    plant.close()

    # Now we run shutdown as a seperate simulation with starting parameters equal to the ending
    # values of the startup simulation

    #**************************************************************************************************

    # Preamble

    start_time = 0.0 * unit.minute
    end_time = 60 * unit.minute
    time_step = 30.0  # seconds
    show_time = (True, 5 * unit.minute)

    use_mpi = False  # True for MPI; False for Python multiprocessing
    plot_results = True  # True for enabling plotting section below
    params = get_params()  # clear params, just to be safe

    #*****************************************************************************
    # Define Cortix system

    # System top level
    plant = Cortix(use_mpi=use_mpi, splash=True)

    # Network
    plant_net = plant.network = Network()

    params['start-time'] = start_time
    params['end-time'] = end_time
    params['shutdown time'] = 0.0
    params['shutdown-mode'] = True

    #*****************************************************************************
    # Create reactor module
    params['delayed-neutron-cc'] = delayed_neutron_cc
    params['n-dens'] = n_dens
    params['fuel-temp'] = fuel_temp
    params['coolant-temp'] = coolant_temp
    params['operating-mode'] = 'shutdown'
    reactor = BWR(params)

    reactor.name = 'BWR'
    reactor.save = True
    reactor.time_step = time_step
    reactor.end_time = end_time
    reactor.show_time = show_time
    reactor.RCIS = False

    # Add reactor module to network
    plant_net.module(reactor)

    #*****************************************************************************
    # Create turbine high pressure module
    params['turbine_inlet_pressure'] = 2
    params['turbine_outlet_pressure'] = 0.5
    params['high_pressure_turbine'] = True
    params['turbine-outflow-temp'] = turbine1_outflow_temp
    params['turbine-chi'] = turbine1_chi
    params['turbine-work'] = turbine1_power
    params['turbine-inflow-temp'] = coolant_temp

    #params_turbine = reactor.params
    #params_turbine.inlet_pressure = 2
    #params.turbine_outlet_pressure = 0.5

    turbine_hp = Turbine(params)

    turbine_hp.name = 'High Pressure Turbine'
    turbine_hp.save = True
    turbine_hp.time_step = time_step
    turbine_hp.end_time = end_time

    # Add turbine high pressure module to network
    plant_net.module(turbine_hp)

    #*****************************************************************************
    # Create turbine low pressure 1 module
    params['turbine_inlet_pressure'] = 0.5
    params['turbine_outlet_pressure'] = 0.005
    params['high_pressure_turbine'] = False
    params['steam flowrate'] = params['steam flowrate'] / 2
    params['turbine-outflow-temp'] = turbine2_outflow_temp
    params['turbine-inflow-temp'] = turbine1_outflow_temp
    params['turbine-chi'] = turbine2_chi
    params['turbine-work'] = turbine2_power

    turbine_lp1 = Turbine(params)

    turbine_lp1.name = 'Low Pressure Turbine 1'
    turbine_lp1.save = True
    turbine_lp1.time_step = time_step
    turbine_lp1.end_time = end_time

    plant_net.module(turbine_lp1)

    #*****************************************************************************
    # Create turbine low pressure 2 module
    params['turbine_inlet_pressure'] = 0.5
    params['turbine_outlet_pressure'] = 0.005
    params['high_pressure_turbine'] = False

    turbine_lp2 = Turbine(params)

    turbine_lp2.name = 'Low Pressure Turbine 2'
    turbine_lp2.save = True
    turbine_lp2.time_step = time_step
    turbine_lp2.end_time = end_time

    plant_net.module(turbine_lp2)

    #*****************************************************************************
    # Create condenser module
    params['steam flowrate'] = params['steam flowrate'] * 2
    params['condenser-runoff-temp'] = condenser_runoff_temp
    condenser = Condenser()

    condenser.name = 'Condenser'
    condenser.save = True
    condenser.time_step = time_step
    condenser.end_time = end_time

    plant_net.module(condenser)

    #*****************************************************************************
    params['RCIS-shutdown-time'] = -1 * unit.minute
    rcis = Cooler(params)
    rcis.name = 'RCIS'
    rcis.save = True
    rcis.time_step = time_step
    rcis.end_time = end_time

    plant_net.module(rcis)

    #*****************************************************************************
    # Create the BoP network connectivity
    plant_net.connect([reactor, 'coolant-outflow'], [turbine_hp, 'inflow'])
    plant_net.connect([turbine_hp, 'outflow-1'], [turbine_lp1, 'inflow'])
    plant_net.connect([turbine_hp, 'outflow-2'], [turbine_lp2, 'inflow'])
    plant_net.connect([turbine_lp1, 'outflow-1'], [condenser, 'inflow-1'])
    plant_net.connect([turbine_lp2, 'outflow-1'], [condenser, 'inflow-2'])
    plant_net.connect([condenser, 'outflow'], [reactor, 'coolant-inflow'])
    plant_net.connect([reactor, 'RCIS-outflow'], [rcis, 'coolant-inflow'])
    plant_net.connect([rcis, 'coolant-outflow'], [reactor, 'RCIS-inflow'])
    #plant_net.connect([rcis, 'signal-in'], [reactor, 'signal-out'])

    plant_net.draw(engine='dot', node_shape='folder')
    #*****************************************************************************
    # Run network dynamics simulation
    plant.run()

    #*****************************************************************************
    # Plot results

    if plot_results and (plant.use_multiprocessing or plant.rank == 0):

        # Reactor plots
        reactor = plant_net.modules[0]

        (quant, time_unit
         ) = reactor.neutron_phase.get_quantity_history('neutron-dens')
        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']')

        plt.grid()
        plt.savefig('shutdown-neutron-dens.png', dpi=300)

        (quant, time_unit
         ) = reactor.neutron_phase.get_quantity_history('delayed-neutrons-cc')
        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']')

        plt.grid()
        plt.savefig('shutdown-delayed-neutrons-cc.png', dpi=300)

        (quant, time_unit
         ) = reactor.coolant_outflow_phase.get_quantity_history('temp')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']')

        plt.grid()
        plt.savefig('shutdown-coolant-outflow-temp.png', dpi=300)

        (quant,
         time_unit) = reactor.reactor_phase.get_quantity_history('fuel-temp')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']')
        plt.grid()
        plt.savefig('shutdown-fuel-temp.png', dpi=300)

        # Turbine high pressure plots
        turbine_hp = plant_net.modules[1]

        (quant,
         time_unit) = turbine_hp.outflow_phase.get_quantity_history('power')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']',
                   title='High Pressure Turbine Power')
        plt.grid()
        plt.savefig('shutdown-turbine-hp-power.png', dpi=300)

        (quant,
         time_unit) = turbine_hp.outflow_phase.get_quantity_history('temp')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']',
                   title='High Pressure Turbine Outflow Temperature')
        plt.grid()
        plt.savefig('shutdown-turbine-hp-outflow-temp.png', dpi=300)

        # Turbine low pressure graphs
        turbine_lp1 = plant_net.modules[2]

        (quant,
         time_unit) = turbine_lp1.outflow_phase.get_quantity_history('power')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']',
                   title='Lower Pressure Turbine 1 Power')
        plt.grid()
        plt.savefig('shutdown-turbine-lp1-power.png', dpi=300)

        (quant,
         time_unit) = turbine_lp1.outflow_phase.get_quantity_history('temp')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']',
                   title='Lower Pressure Turbine 1 Outflow Temperature')
        plt.grid()
        plt.savefig('shutdown-turbine-lp1-outflow-temp.png', dpi=300)

        # Condenser graphs
        condenser = plant_net.modules[4]

        (quant,
         time_unit) = condenser.outflow_phase.get_quantity_history('temp')

        quant.plot(x_scaling=1 / unit.minute,
                   x_label='Time [m]',
                   y_label=quant.latex_name + ' [' + quant.unit + ']')
        plt.grid()
        plt.savefig('shutdown-condenser-outflow-temp.png', dpi=300)

    # Shutdown The Simulation
    plant.close()
Пример #13
0
def simple_stack_bins(folder, compo='ZZ', stack_method='linear', df=20.):
    """ Stacking function to stack bins together
    The output file will be saved in bins/cc_average/
    and sorted by stations.

    :param folder: The folder where are the cc bins (dayly?)
    :type folder: str
    :param compo: The component to compute
    :type compo: str
    :param stack_method: 'linear' or 'pws'
    :type stack_method: str
    :param df: sampling rate
    :type df: int, float
    """

    params = get_params()
    if folder.endswith('/'): pass
    else: folder += '/'
    sourcesta = os.path.dirname(folder).split('/')[-1]
    fileout = params['corrType'] + '.' + sourcesta + '.' + compo
    folderout = '%s/bins/cc_average' % params['WORKDIR']
    out = os.path.join(folderout, fileout)
    print(folder)
    try:
        os.makedirs(folderout)
    except:
        pass

    bins = sorted(glob.glob(folder + '*%s*' % compo))
    print(len(bins),
          ' CC for component %s for station %s' % (compo, sourcesta))

    for ibin, bin in enumerate(bins):
        day = os.path.basename(bin)
        try:
            matrix = np.load(bin)
        except:
            continue
        if ibin == 0.:
            divider = np.ones(np.shape(matrix)[0])
            if glob.glob(out + '_*.npy') != []:
                print('>> One stack file already exist: continue.')
                stacker = np.load(bin, allow_pickle=True)
            else:
                print('>> New stacking...')
                stacker = matrix
            continue
        else:
            for icc, cc in enumerate(matrix):
                if np.all(cc == 0.):
                    continue
                else:
                    try:
                        stacker[icc] = stack(np.vstack((stacker[icc], cc)), \
                            stack_method=stack_method, df=df)
                    # divider[icc]+=1.
                    except Exception as e:
                        print(e)
        print(sourcesta, day)
    #for i in np.arange(len(stacker)):
    #    stacker[i] /= divider[i]

    np.save(out + '.%s' % ibin, stacker)

    del matrix, stacker
Пример #14
0
#simple script to test the condenser
from params import get_params
from condenser import Condenser

input = get_params()
input['start-time'] = 0
input['end-time'] = 0

condenser = Condenser(input)
condenser.tester()

Пример #15
0
from load_dataset import load_dataset
from params import get_params, get_possible_configurations

import traceback

import tux

params, hyperparams_list = get_params()

configs = get_possible_configurations(hyperparams_list)

print(params)
print(hyperparams_list)

df = load_dataset(params["nb_yes"])
params["dataset"] = df

if len(configs) > 0:
    for config in configs:

        for k, v in config.items():
            params["hyperparams"][k] = v

        try:
            ml = tux.TuxML(**params)
        except Exception as e:
            print(traceback.format_exc())
            print(e)
        print("Starting")

        try:
Пример #16
0
import torchvision.utils as vutils
from torch.autograd import Variable
import logging

## Import GANs ##
from GANs import *
## Import Classifiers ##
from classifiers import *
## Import utility functions ##
from utils import progress_bar, init_params, weights_init
from params import get_params
from dataset import get_dataset
from plotter import Plotter

## Hyper parameters ##
opt = get_params()

## Logger ##
logger = logging.getLogger()
file_log_handler = logging.FileHandler(opt.logfile)
logger.addHandler(file_log_handler)

stderr_log_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stderr_log_handler)

logger.setLevel('INFO')
formatter = logging.Formatter()
file_log_handler.setFormatter(formatter)
stderr_log_handler.setFormatter(formatter)

logger.info(opt)
    T.grad(
        model_ft.models_stack[-1].cost() +
        model_ft.models_stack[-1].weightdecay(weightdecay),
        model_ft.models_stack[-1].params))


def return_grad(test_params, input_x, truth_y):
    tmp = get_params(model_ft.models_stack[-1])
    set_params(model_ft.models_stack[-1], test_params)
    result = numpy.concatenate(
        [numpy.array(i).flatten() for i in fun_grad(input_x, truth_y)])
    set_params(model_ft.models_stack[-1], tmp)
    return result


p, g, numlinesearches = minimize(get_params(model_ft.models_stack[-1]),
                                 return_cost,
                                 return_grad,
                                 (train_x.get_value(), train_y.get_value()),
                                 logreg_epc,
                                 verbose=False)
set_params(model_ft.models_stack[-1], p)
save_params(
    model_ft,
    'ZLIN_4000_1000_4000_1000_4000_1000_4000_10_normhid_nolinb_cae1_dropout.npy'
)
print "***error rate: train: %f, test: %f" % (train_set_error_rate(),
                                              test_set_error_rate())

#############
# FINE-TUNE #
Пример #18
0
def main():
    param = get_params("/data/zjy/csqa_data", "/home/zhangjingyao/preprocessed_data_10k")
    pre_process(param)
Пример #19
0
                print f.name(), ':', f.stringValue(), ',  '
                if (f.name() == "wiki_id"):
                    pids.append(f.stringValue())
            print ''
        print '-------------------------------------\n'
        return pids


if __name__ == "__main__":
    ls = LuceneSearch()
    # ls.search("United States")
    # ls.search("India")# Q15975440 Q2060630 Q1936198  Q668 Q274592
    # ls.search("River")# Q20862204 Q20863315 Q7322321 Q11240974 Q2784912
    pids = ls.search(
        "Yangtze")  # Q5099535 Q3674754 Q3447500 Q1364589 Q19601344
    params = get_params("/data/zjy/csqa_data",
                        "/home/zhangjingyao/preprocessed_data_10k")
    wikidata, item_data, prop_data, child_par_dict = load_wikidata(
        params["wikidata_dir"])  # data for entity ,property, type
    for pid in pids:
        if (pid in wikidata.keys()):
            for prop in wikidata[pid]:
                if prop in prop_data:
                    print(pid + ":" + item_data[pid],
                          prop + ":" + prop_data[prop], [
                              e + ":" + item_data[e]
                              for e in wikidata[pid][prop]
                          ])

        else:
            print(pid, "not in wikidata")
Пример #20
0
    return StackedBeams


def stackbeamfiles(list_beamfiles):
    for iff, f in enumerate(list_beamfiles):
        b = sio.loadmat(f)['beam']
        if iff == 0:
            superstack = b
        else:   
            superstack += b
    superstack /= iff+1

    return superstack

if __name__=='__main__':
    p = get_params()
    import scipy.io as sio
    binfile = np.load(p['datdir']+'daily.NL.2015.330.20sps.Z.npy')
    get_new_binfile(binfile, p['stations'], p['receivers'])


    freq = [0.5]
    for f in freq:
        g = glob.glob('out/beam_DA.2016.*_%s.mat'%f)
    superstack = stackbeamfiles(g)
    a = sio.loadmat(g[0])
    theta = a['theta']
    freqs = a['freqs']
    slowness = a['slowness']
    sio.savemat('superstackbeam_%s.mat'%freq[0], {'beam':superstack, 'theta':theta, 
        'freqs':freqs, 'slowness':slowness, 'f':freq[0]})
Пример #21
0


#===============================================================================
# Read the input and generate the plots
#===============================================================================


data = []
data2 = defaultdict(lambda: defaultdict(list))

for j, file in enumerate(files):

    print file
    id = int(file[-8:-4])
    params = get_params(id)
    if not params: continue

    run_id, nepochs, gamma_tot, \
    rE_true, closest_star_approach, rE_sample, num_samples = params

    try:
    #if 1:
        vals = loadtxt(file)    
        minL = argmin(vals[:,1])
        maxL = argmax(vals[:,1])
        b,e = vals[0], vals[-1]

        d = [rE_true,closest_star_approach,vals,b,e,minL,maxL]
        data2[nepochs][gamma_tot].append(d)
Пример #22
0
def optimize_hyperparameters(self, out=""):
    
    my_params = get_params(self.use_kernels,
                           self.use_means)
    
    print("optimizing hyper-parameters (%i)..." % len(my_params['means']))
    
    mean_params = my_params["means"]
    std_params = my_params["stds"]
    bounds = my_params["bounds"]
    use_log = my_params["use_log"]
    
    init_params = self.params
    
    print("initialization of parameters :")
    print(my_params["names"])
    print(np.round(self.params, 2).tolist())
    
    
    def get_neg_log_likelihood(params,
                               *args,
                               **kwargs):
        
        self.params = list(params)
        
        mu = self.compute_mu(Xtesting=self.X_training())
        K = self.compute_K(XX=self.X_training())
        
        Ycentered = self.Y_training() - mu
        
        L = np.linalg.cholesky(K)
        log_det_K = 2 * np.trace(np.log(L))
        
        aux = np.linalg.solve(L, Ycentered.T)
        YcenteredTxK_inv = np.linalg.solve(L.T, aux).T
        
        log_likelihood = -0.5 * np.dot(YcenteredTxK_inv, Ycentered) - 0.5 * log_det_K
        
        neg_log_likelihood = -log_likelihood
        
        return neg_log_likelihood
    
    def get_neg_log_posterior(params,
                              *args,
                              **kwargs):
        
        log_likelihood = -np.array(get_neg_log_likelihood(params))

        log_prior = 0
        for i in range(len(params)):
            if use_log[i]:
                x = np.log(params[i])
                mu = np.log(mean_params[i])
                log_prior -= x
            else:
                x = params[i]
                mu = mean_params[i]
            
            log_prior += scipy.stats.norm.logpdf(x,
                                               loc=mu,
                                               scale=std_params[i])
        
        log_posterior = log_likelihood + log_prior
        
        neg_log_posterior = -log_posterior
        
        return neg_log_posterior
        
    if self.estimator.lower() == "mle":
        fitness_function = get_neg_log_likelihood
        
    elif self.estimator.lower() == "map":
        fitness_function = get_neg_log_posterior
    
    else:
        raise ValueError("%s estimator is not implemented: should be 'MLE' or 'MAP'" % self.estimator)

    theta = scipy.optimize.fmin_l_bfgs_b(fitness_function,
                                         init_params,
                                         approx_grad=True,
                                         bounds=bounds,
                                         maxiter=300,
                                         disp=1)
    
    params_found = theta[0]
    if self.estimator == "MLE":
        score = get_neg_log_likelihood(params_found)
    else:
        score = get_neg_log_posterior(params_found)
    
    print('done')
    print('Parameters found:')
    for k in range(len(my_params["names"])):
        print(my_params["names"][k] + " : " + str(params_found[k]))
    print("Score found : " + str(score))
    print('-' * 50)
    
    if out:
            
        with open(out, 'a', newline='') as csvfile:
            my_writer = csv.writer(csvfile, delimiter='\t',
                                   quoting=csv.QUOTE_MINIMAL)
            my_writer.writerow(['-' * 50])
            my_writer.writerow([self.use_kernels,
                                self.use_means,
                                self.variable,
                                self.estimator])
            my_writer.writerow(list(my_params["names"]))
            my_writer.writerow(list(np.round(params_found, 4)))
            my_writer.writerow([self.estimator,
                                score])
    
    return params_found.tolist()
Пример #23
0
dw = 0.02
w = arange(-5.0, 5.0, dw)

omega = 1.0

lamb = 1.0
alpha = sqrt(omega**2*lamb)
g = alpha/sqrt(omega)
idelta = 0.030j
dens = 0.7

SC = 1
'''

Nw, Nk, beta, iwm, vn, dw, w, omega, lamb, alpha, g, idelta, dens, SC = params.get_params(
    int(sys.argv[1]))

print('beta %1.3f' % beta)
print('alpha = %1.3f' % alpha)
print('omega = %1.3f' % omega)
print('lamb = %1.3f' % lamb)
print('g = %1.3f' % g)
print('lamb correct = %1.3f' % (2 * g**2 / (8.0 * omega)))
print('Nk = %d' % Nk)
print('delta = %1.3f' % idelta.imag)

folder = 'data2d/data_sc_renormalized_%db%d_lamb%1.1f_beta%1.1f_idelta%1.3f/' % (
    Nk, Nk, lamb, beta, idelta.imag)
if not os.path.exists(folder):
    os.mkdir(folder)
					print "cmd:",				 cmd

					# execute
					os.system(cmd)

					ap 												 = np.loadtxt("tmp.txt")
					dic_res[q_name+"_"+str(i)] = ap
					print ap
					print

					# append
					ap_list.append(ap)
			
		return ap_list

if __name__ == "__main__":

	params  = get_params()
	E 	    = Evaluator(params)
	ap_list = E.run_evaluation()
	
	print "\n\n"
	print "====="

	for ap in ap_list:
		print "ap:", ap
		
	print "====="
	print "mAP:", np.mean(ap_list)
	
	print "\n\n"
Пример #25
0
import configparser
import pandas as pd

# Import custom modules
import stt
import tts
import luis_scoring as luis
import params as pa
import helper as he
import evaluate as eval
''' COMMAND EXAMPLES '''
# python .\src\glue.py --do_synthesize --input input/scoringfile.txt

# Parse arguments
parser = argparse.ArgumentParser()
args = pa.get_params(parser)

# Set arguments
fname = args.input
audio_files = args.audio
do_synthesize = args.do_synthesize
do_scoring = args.do_scoring
do_transcribe = args.do_transcribe
do_evaluate = args.do_evaluate

# Get config from file
pa.get_config()

# Set logging level to INFO
logging.getLogger().setLevel(logging.INFO)
def return_cost(test_params, input_x, truth_y):
    tmp = get_params(model_ft.models_stack[-1])
    set_params(model_ft.models_stack[-1], test_params)
    result = fun_cost(input_x, truth_y)
    set_params(model_ft.models_stack[-1], tmp)
    return result
def return_cost(test_params, input_x, truth_y):
    tmp = get_params(model_ft.models_stack[-1])
    set_params(model_ft.models_stack[-1], test_params)
    result = fun_cost(input_x, truth_y)
    set_params(model_ft.models_stack[-1], tmp)
    return result
Пример #28
0
import  numpy                    as      np
import  astropy.units            as      u

from    cosmo                    import  cosmo
from    params                   import  get_params
from    schechterfn              import  SchechterLfn, SchechterMfn
from    dVols                    import  dVols


params = get_params()

def visibilecut(x, xlim, type='mag'):
  '''                                                                                                                                                      
  Function to return unity if M <= M_lim or L >= L_lim; Otherwise, zero.                                                                                   
  '''

  if type == 'mag':
    return  np.piecewise(x, [x >  xlim, x <= xlim], [0., 1.])

  elif type == 'lum':
    return  np.piecewise(x, [x >= xlim, x <  xlim], [1., 0.])

  else:
    raise  ValueError("Requested type is not available")

def  mlimitedM(z, mlim, M_standard=None, kcorr=True):
  ''' 
  Return M_lim (L_lim) in units of M_standard (L_standard) for given redshift 
  and apparent mag. limit.  Here, M_standard is e.g. M* for the Schechter fn. 
  '''
Пример #29
0
def main():
    parser = argparse.ArgumentParser(description="Arg parser for Coal-HMM")
    parser.add_argument("-f",
                        action="store",
                        dest="f",
                        type=str,
                        required=False)
    parser.add_argument("-out",
                        action="store",
                        dest="out",
                        type=str,
                        required=True)
    parser.add_argument("-c",
                        action="store",
                        dest="chromosome",
                        type=str,
                        required=True)
    parser.add_argument("--rerun", action="store_true", required=False)
    parser.add_argument("--outgroup", action="store_true", required=False)

    args = parser.parse_args()
    intervals_file = args.out

    if args.rerun:
        sequences = get_multiple_alignments(args.chromosome)
    else:
        sequences = read_seqs(args.f)

    for align_index, alignments in enumerate(sequences):
        (s, u, v1, v2), init_ps, (a, b, c, a_t, b_t,
                                  c_t), mu = get_params(len(alignments[1][0]))
        print("Parameters: ", s, u, v1, v2, init_ps, (a, b, c, a_t, b_t, c_t))

        transition_probabilities = {
            "HC1": {
                "HC1": np.log(1 - 3 * s),
                "HC2": np.log(s),
                "HG": np.log(s),
                "CG": np.log(s),
            },
            "HC2": {
                "HC1": np.log(u),
                "HC2": np.log(1 - u - 2 * v1),
                "HG": np.log(v1),
                "CG": np.log(v1),
            },
            "HG": {
                "HC1": np.log(u),
                "HC2": np.log(v1),
                "HG": np.log(1 - u - v1 - v2),
                "CG": np.log(v2),
            },
            "CG": {
                "HC1": np.log(u),
                "HC2": np.log(v1),
                "HG": np.log(v2),
                "CG": np.log(1 - u - v1 - v2),
            },
        }
        initial_probabilities = dict(zip(STATES, init_ps))

        emission_probabilities = {}
        for i in range(4):
            st = STATES[i]
            if i == 0:
                emission_probabilities[st] = likelihood(i, a, b, c, mu)
            else:
                emission_probabilities[st] = likelihood(i, a_t, b_t, c_t, mu)

        # Viterbi
        sequence, p = viterbi(
            alignments[1],
            transition_probabilities,
            emission_probabilities,
            initial_probabilities,
        )
        intervals = find_intervals(sequence)
        # Write intervals
        with open(intervals_file, "a") as f:
            for i in range(4):
                f.write("%s\n" % (STATES[i]))
                f.write("\n".join([("(%d,%d)" % (start, end))
                                   for (start, end) in intervals[i]]))
                f.write("\n")
        print("{} Viterbi probability: {:.2f}".format(alignments[0], p))

        # Compute posterior probs
        F, likelihood_f, B, likelihood_b, R, R_combined = forward_backward(
            alignments[1],
            transition_probabilities,
            emission_probabilities,
            initial_probabilities,
        )
        # Save posterior probs
        # np.savetxt(
        #     "posteriors.{}.csv".format(align_index), R, delimiter=",", fmt="%.4e"
        # )
        print("{} Forward likelihood: {:.2f}".format(alignments[0],
                                                     likelihood_f))

        # get genealogy from divergent sites
        divergent_info = divergent_sites(alignments[1], args.outgroup)
        # Plot
        for i in range(4):
            plot(
                R[i],
                R_combined[i - 1],
                intervals,
                i,
                align_index,
                [int(i) for i in alignments[0][1:-1].split(",")],
                args.chromosome,
                divergent_info,
            )
Пример #30
0
def main():
    recipe_file = "recipes/earth.json"

    params.get_params('params/params.ini', recipe_file)

    structure.structure(6371000.0)
Пример #31
0
    if plot_nums:
        plot_title += '\n # trials plotted: ' + str(num_plotted)
    # Formatting...
    ax.set_xlabel('Time (ms)')
    ax.set_ylabel(y_label)
    plt.axvline(x=0, lw=0.5, color='0')
    plt.xlim((x[0], x[-1]))
    box = ax.get_position()
    ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
    ax.legend(bbox_to_anchor=(1, 1.04), frameon=False)
    plt.title(plot_title)
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    plt.savefig(make_path(plot_fname,
                          '.png',
                          out_dir=out_dir,
                          base_name=base_name),
                bbox_inches='tight',
                dpi=600)
    # TODO in the plot include info on how many trials were plotted.
    # TODO Plot a vertical line @ 0.


if __name__ == '__main__':
    params = params.get_params(sys.argv)
    fname = select_files('.pkl')[0]
    params['out_dir'] = os.path.dirname(fname)
    print(fname)
    e = load_pkl(fname)
    plot_conds(e, **params)
Пример #32
0
compute_quantities = True  # True -> needs compute_EIF_output set to True
save_rate_mod = False  # True to save linear rate response functions,
# False is default to save memory
save_EIF_output = True
save_quantities = True
plot_filters = False  # True -> needs EIF_output
plot_quantities = False

# location for saving/loading
folder = os.path.dirname(os.path.realpath(__file__))  # directory for the files
# currently the same directory as for the script itself
output_filename = 'EIF_output_for_cascade.h5'
quantities_filename = 'quantities_cascade.h5'

# PREPARE ---------------------------------------------------------------------
params = get_params()  # loads default parameter dictionary

#params['t_ref'] = 0.0  # refractory period can be >0 (but not all reduced models
#                              in the paper support this)

# choose a plausible range of values for mu and sigma:
# (which range is plausible depends on the neuron model parameter values)
# e.g., for mu from -1 to 5 with spacing 0.025 mV/ms,
# for sigma from 0.5 to 5 with spacing 0.1 mV/sqrt(ms)
N_mu_vals = 350  #241
N_sigma_vals = 64
mu_vals = np.linspace(-1.0, 5.0, N_mu_vals)
sigma_vals = np.linspace(0.5, 5.0, N_sigma_vals)
# these values above were used to generate the files EIF_output_for_cascade.h5
# and quantities_cascade.h5 available on Github
Пример #33
0
def main():
    start = time.clock()
    params = get_params()

    datasetD = make_seq_2_seq_dataset(params)
    train_x = datasetD['train']['x']
    train_y = datasetD['train']['y']
    test_x = datasetD['test']['x']
    test_y = datasetD['test']['y']

    input_window_samps = params.input_window_length_samples
    num_signals = params.num_signals
    output_window_samps = params.output_window_length_samples
    train_X = np.array(train_x, dtype=float)
    train_Y = np.array(train_y, dtype=float)
    nsamples, nx = train_X.shape
    print(nsamples)
    print(nx)
    train_Y = np.reshape(train_Y,
                         (nsamples, output_window_samps * num_signals))

    test_X = np.array(test_x, dtype=float)
    print(test_X.shape)
    test_Y = np.array(test_y, dtype=float)
    test_sample, test_nx = test_X.shape
    test_Y = np.reshape(test_Y,
                        (test_sample, output_window_samps * num_signals))
    print(test_Y.shape)

    train_Y_SVR = train_Y[:, 8:]
    test_Y_SVR = test_Y[:, 8:]

    model_SVR = MultiOutputRegressor(SVR(kernel='rbf', degree=3))
    model_SVR.fit(train_X, train_Y_SVR)
    result_SVR = model_SVR.predict(test_X)
    score_SVR = metrics.mean_squared_error(test_Y_SVR, result_SVR)
    f = open(
        'C:/Users/HD1047208/OneDrive - Bose Corporation/Desktop/data/Hang_SVR.pickle',
        'wb')
    pickle.dump(model_SVR, f)
    f.close()
    print(score_SVR)
    plt.figure()

    plt.plot(np.arange(len(result_SVR)),
             result_SVR[:, 0],
             'r-',
             label='predict value_x0')
    plt.plot(np.arange(len(result_SVR)),
             result_SVR[:, 1],
             'b-',
             label='predict value_y0')
    plt.plot(np.arange(len(result_SVR)),
             result_SVR[:, 2],
             'g-',
             label='predict value_z0')
    plt.plot(np.arange(len(result_SVR)),
             result_SVR[:, 3],
             'y-',
             label='predict value_w0')
    plt.plot(np.arange(len(result_SVR)),
             test_Y_SVR[:, 0],
             'k-',
             label='test value_x0')
    plt.plot(np.arange(len(result_SVR)),
             test_Y_SVR[:, 1],
             'm-',
             label='test value_y0')
    plt.plot(np.arange(len(result_SVR)),
             test_Y_SVR[:, 2],
             'c-',
             label='test value_z0')
    plt.plot(np.arange(len(result_SVR)),
             test_Y_SVR[:, 3],
             'k-',
             label='test value_w0')
    plt.xlabel('length')
    plt.ylabel('result')
    plt.title('SVR prediction')
    plt.legend()
    plt.show()
    print(result_SVR.shape)
    print(test_Y.shape)
    print(time.clock() - start)
Пример #34
0
from loadecog import load_ecog
from plotscatter import plot_scatter, plot_scatter_der
from plotline import plot_line

import time
import numpy as np
import pickle
import scipy.io as spio
from scipy.io import loadmat
import os

import matplotlib.pyplot as plots

if __name__ == '__main__':
    t0 = time.time()
    params = get_params(sys.argv)
    all_channels = params['all_channels']

    #select files
    pupil_path_raw = select_files('Select Pupil Data')
    pupil_path = pupil_path_raw[0]

    if all_channels:
        ecog_path = select_files('Select ECoG Data')
        print(ecog_path)
    else:
        folder = select_folder(prompt='Select your folder')
        file_names = [
            f for f in os.listdir(folder) if ('mat' in f) and 'pupil' in f
        ]
        ecog_path = list()
    set_params(model_ft.models_stack[-1], tmp)
    return result

fun_grad = theano.function(
    [model_ft.varin, model_ft.models_stack[-1].vartruth],
    T.grad(model_ft.models_stack[-1].cost() + model_ft.models_stack[-1].weightdecay(weightdecay),
           model_ft.models_stack[-1].params)
)
def return_grad(test_params, input_x, truth_y):
    tmp = get_params(model_ft.models_stack[-1])
    set_params(model_ft.models_stack[-1], test_params)
    result = numpy.concatenate([numpy.array(i).flatten() for i in fun_grad(input_x, truth_y)])
    set_params(model_ft.models_stack[-1], tmp)
    return result
p, g, numlinesearches = minimize(
    get_params(model_ft.models_stack[-1]), return_cost, return_grad,
    (train_x.get_value(), train_y.get_value()), logreg_epc, verbose=False
)
set_params(model_ft.models_stack[-1], p)
save_params(model_ft, 'ZLIN_4000_1000_4000_1000_4000_1000_4000_10_normhid_nolinb_cae1_dropout.npy')
print "***error rate: train: %f, test: %f" % (
    train_set_error_rate(), test_set_error_rate()
)

#############
# FINE-TUNE #
#############

"""
print "\n\n... fine-tuning the whole network"
truth = T.lmatrix('truth')
Пример #36
0
import itertools
import numpy as np
import os
import random
import sys
import tensorflow as tf
import plotting
import params
import scipy
from collections import deque, namedtuple
from nn_utils import attention

if "../" not in sys.path:
    sys.path.append("../")

_config = params.get_params()

_env = gym.envs.make(_config.env_name)

# VALID_ACTIONS = [0, 1, 2, 3]
VALID_ACTIONS = list(range(_env.action_space.n))

# Create directories
if not os.path.exists(_config.checkpoint_dir):
    os.makedirs(_config.checkpoint_dir)
if not os.path.exists(_config.monitor_path):
    os.makedirs(_config.monitor_path)

vocabulary = []
with open(_config.vocab_path, 'r') as f:
    for index, line in enumerate(f):
def return_grad(test_params, input_x, truth_y):
    tmp = get_params(model_ft.models_stack[-1])
    set_params(model_ft.models_stack[-1], test_params)
    result = numpy.concatenate([numpy.array(i).flatten() for i in fun_grad(input_x, truth_y)])
    set_params(model_ft.models_stack[-1], tmp)
    return result
Пример #38
0
def test_file(root, f):
    params = get_params("/data/zjy/csqa_data",
                        "/home/zhangjingyao/preprocessed_data_10k")
    qa_path = root + f
    qa_file = open(qa_path)
    qa_result = open(qa_path[:-4] + "_result.txt", "w+")
    qa_result.truncate()
    question_parser = QuestionParser(params, True)
    sym_seq = []
    flag = 0
    qa_id = 0
    for line in qa_file:
        if line.startswith("symbolic_seq.append"):
            flag = 1
            key = line[line.find("{") +
                       1:line.find('}')].split(':')[0].replace('\"',
                                                               '').strip()
            val = line[line.find("{") + 1:line.find('}')].split(':')[1].strip()
            val = val.replace('[', '').replace(']', '').replace("\'",
                                                                "").split(',')

            sym_seq.append({key: val})
        if line.startswith("response_entities"):
            count = 0

            answer_entities = line.replace("response_entities:",
                                           '').strip().split("|")
        if line.startswith("orig_response"):
            orig_response = line.replace("orig_response:", '').strip()

        if (line.startswith("-----------") and flag == 1):
            time_start = time.time()
            symbolic_exe = symbolics.Symbolics(sym_seq)
            answer = symbolic_exe.executor()

            if (type(answer) == dict):
                temp = []
                for key, value in answer.items():
                    if (value):
                        temp.extend(list(value))
                answer = temp

            elif type(answer) == type([]) or type(answer) == type(set([])):
                answer = sorted((list(answer)))
            elif type(answer) == int:
                answer = [answer]
            else:
                answer = [answer]
            time_end = time.time()

            if (orig_response == "None") and answer == []:
                answer = ['None']
                answer_entities = ['None']

            if len(answer) > 500:
                print(("answer is :", list(answer)[:500]),
                      end="",
                      file=qa_result)
            else:
                print(("answer is :", list(answer)), end="", file=qa_result)
            print(('time cost:', time_end - time_start),
                  end="",
                  file=qa_result)
            for e in answer_entities:
                if (e in answer):
                    count += 1

            print(("orig:", len(answer_entities), "answer:", len(answer),
                   "right:", count),
                  end="",
                  file=qa_result)
            print('===============================', end="", file=qa_result)
            flag = 0
            sym_seq = []

        if ("response"
            ) in line or line.startswith("context_utterance") or line.replace(
                "\n", "").isdigit() or "state" in line:
            print((
                qa_result,
                line,
            ), end="", file=qa_result)
Пример #39
0
import params
from ksptrack.siamese import train_autoencoder, train_dec, train_init_clst, train_siam


def main(cfg):
    train_autoencoder.main(cfg)
    train_init_clst.main(cfg)

    train_siam.main(cfg)

    # train_dec.main(cfg)

    train_siam.main(cfg)


if __name__ == "__main__":

    p = params.get_params()

    p.add('--out-root', required=True)
    p.add('--in-root', required=True)
    p.add('--train-dir', required=True)
    p.add('--run-dir', required=True)
    p.add('--init-cp-fname')

    cfg = p.parse_args()
    main(cfg)
Пример #40
0
    # Do this so that we can have params.py in the current directory, but have
    # ml.py in a different one.
    sys.path.insert(0, ".")

    # ---------------------------------------------------------------------------
    # params.py is generated by run_ml.py. It contains all the parameter
    # combinations that will be explored. Each invocation of ml.py receives a
    # different id, which is passed to the get_params() function defined in
    # params.py to get the correct parameters.
    # ---------------------------------------------------------------------------
    from params import get_params

    get_params = __import__("params", globals(), locals(), ["get_params"], 0).get_params

    run_id, nepochs, gamma_tot, rE_true, closest_star_approach, rE_sample, num_samples = get_params(id)

    # ---------------------------------------------------------------------------
    # Global parameters.
    # ---------------------------------------------------------------------------

    beta = 0.20  # arcsec - Basis function normalization
    Nbases = 20  # sqrt(Number of basis functions)
    grid_phys = 2.0  # arcsec - Physical size across grid

    grid_radius = 35  # pixels
    grid_size = 2 * grid_radius + 1  # pixels

    cell_size = grid_phys / grid_size  # arcsec/pixel
    star_mask_size = 1 * cell_size  # arcsec