Exemplo n.º 1
0
def mini_topsim():
    """
    Reads the Simulation parameters, starts the sim, plots and writes to file

    the first sys.argv[1] is the simulation time 
    the second sys.argv[2] is the timestep

    if no sys arguments are passed the simulation starts with tend=10 and dt=1
    creates a Surface Object and starts the simulation. 
    the correct timestep is calculated with the timestep function 
    from the advance module. 
    Writes all calculated datapoints to a file with the 
    filenname: basic_<tend>_<dt>.srf
    plots the simulation fpr t = 0 and t = tend

    """
    print('Running miniTopSim ...')

    if len(sys.argv) > 1:
        config_filename = sys.argv[1]
    else:
        config_filename = './config1.cfg'

    config_file = os.path.join(os.path.dirname(__file__), config_filename)

    if not config_file.endswith('.cfg'):
        print('Error: Incorrect config.')
        sys.exit()

    filename = config_file[:-4] + '.srf'

    if os.path.exists(filename):
        os.remove(filename)

    par.load_parameters(config_file)

    tend = par.TOTAL_TIME
    dt = par.TIME_STEP

    surface = Surface()
    time = 0

    while time < tend:
        surface.write(time, filename)
        dtime = timestep(dt, time, tend)
        advance(surface, dtime)
        time += dtime

    surface.write(time, filename)

    if par.PLOT_SURFACE:
        plot.plot(filename)
Exemplo n.º 2
0
def mini_topsim():
    """
    Reads the Simulation parameters, starts the sim, plots and writes to file

    the first sys.argv[1] is the config file name.

    if no sys argument is passed the programm will stop.
    Writes all calculated datapoints to a file with the 
    filenname: <config_file_name>.srf

    """
    print('Running miniTopSim ...')

    if len(sys.argv) > 1:
        config_filename = sys.argv[1]
    else:
        print("Error: No config passed.")
        sys.exit()

    config_file = os.path.join(os.path.dirname(__file__), config_filename)

    if not config_file.endswith('.cfg'):
        print('Error: Incorrect config.')
        sys.exit()

    filename = os.path.splitext(config_file)[0] + '.srf'

    if os.path.exists(filename):
        os.remove(filename)

    par.load_parameters(config_file)

    tend = par.TOTAL_TIME
    dt = par.TIME_STEP

    surface = Surface()
    time = 0

    while time < tend:
        surface.write(time, filename)
        dtime = timestep(dt, time, tend)
        advance(surface, dtime)
        time += dtime

    surface.write(time, filename)

    if par.PLOT_SURFACE:
        plot.plot(filename)
Exemplo n.º 3
0
def main(_):
  config=parameters.load_parameters()
  eval_config= copy.deepcopy(config)
  eval_config.batch_size=1
  config_json = json.dumps(vars(config), indent=4, sort_keys=True)
  #print ("config",config_json) 

  Train,Dev,Test,vocab = reader.file2seqid(config)
  pretrain_embedding = generate_embeddings(config.glove_path,vocab, config.glove_dir) 
  with tf.Graph().as_default():
    #initializer = tf.contrib.layers.xavier_initializer(uniform=True,seed=None,dtype=tf.float32)
    initializer = tf.random_uniform_initializer(-config.init_scale,config.init_scale)
    with tf.name_scope("Train"):
      with tf.variable_scope("Model", reuse=None, initializer=initializer):
        m = MyModel(is_training=True, config=config,pretrain_embedding=pretrain_embedding)
    
    with tf.name_scope("Valid"):
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mvalid = MyModel(is_training=False,config=eval_config,pretrain_embedding=pretrain_embedding)

    with tf.name_scope("Test"):
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mtest = MyModel(is_training=False, config=eval_config,pretrain_embedding=pretrain_embedding)
      
 
    sv = tf.train.Supervisor() #logdir用来保存checkpoint和summary
    with sv.managed_session() as session:
      print ("model params",np.sum([np.product([xi.value for xi in x.get_shape()]) for x in tf.trainable_variables()]))

      model_existed=False
      if config.restore_model !=None:
        print ("restore model from: %s "%(config.restore_model))
        sv.saver.restore(session, config.restore_model)
        model_existed=True
      else:
        #ckpt = tf.train.get_checkpoint_state(config.save_path)
        ckpt_file = os.path.join(config.log_path, config.save_path) 
        ckpt = tf.train.get_checkpoint_state(ckpt_file)
        if ckpt and ckpt.model_checkpoint_path: 
          print ("restore model from: %s "%(ckpt.model_checkpoint_path))
          sv.saver.restore(session, ckpt.model_checkpoint_path)
          init_step = int(ckpt.model_checkpoint_path.rsplit('-',1)[1])
          model_existed=True
        
       #dev_acc,dev_loss,_,_,dev_pred,dev_label,dev_pred_total,dev_true_total= run_epoch(session,data=Dev,model=mvalid,config=eval_config)
       #print("Epoch: %d dev_acc: %.3f dev_loss %.3f" % (i + 1, dev_acc,dev_loss))
      if model_existed==True:
        test_acc,test_loss,_,_,test_pred_label,test_true_label = run_epoch(session, data=Test,model=mtest,config=eval_config)
        print("\n\n-----------------------testing!!!-----------------------" )
        print("test_acc: %.3f test_loss %.3f" % (test_acc,test_loss))
        print("\nthe confuse_matrix of test:\n")
        print(confuse_matrix(true_label_total=test_true_label,pred_label_total=test_pred_label,config=config))
        #label_input(true_label=test_true_label,
        #      pred_label=test_pred_label,
        #      input_file=config.test_file,
        #      output_file=FLAGS.label_out_file)   

      else:
        print ("pretrained model does not exist")
Exemplo n.º 4
0
def read_parameters(alternative=False):
    global attribute_skill_dict
    global armor_names
    global attribute_names
    global character_names
    global notes_names
    global parameter_names
    param_dict = load_parameters(alternative=alternative)
    attribute_skill_dict = param_dict["skill"]
    armor_names = param_dict["armor"]
    attribute_names = param_dict["attrib"]
    character_names = param_dict["character"]
    notes_names = param_dict["notes"]
    parameter_names = param_dict["param"]
Exemplo n.º 5
0
import numpy as np
import re
import random
import json
import collections
import parameters as params
import pickle

FIXED_PARAMETERS = params.load_parameters()

LABEL_MAP = {"entailment": 0, "neutral": 1, "contradiction": 2, "hidden": 0}

PADDING = "<PAD>"
UNKNOWN = "<unk>"


def load_nli_data(path, snli=False, lower=True):
    """
    Load MultiNLI or SNLI data.
    If the "snli" parameter is set to True, a genre label of snli will be assigned to the data. 
    """
    data = []
    with open(path) as f:
        for line in f:
            loaded_example = json.loads(line)
            if lower:
                loaded_example['sentence1_binary_parse'] = loaded_example[
                    'sentence1_binary_parse'].lower()
                loaded_example['sentence2_binary_parse'] = loaded_example[
                    'sentence2_binary_parse'].lower()
            if loaded_example["gold_label"] not in LABEL_MAP:
Exemplo n.º 6
0
def main(_):
  config=parameters.load_parameters()
  eval_config= copy.deepcopy(config)
  eval_config.batch_size=1
  config_json = json.dumps(vars(config), indent=4, sort_keys=True)

  if not os.path.exists(config.log_path):
    os.makedirs(config.log_path)

  if config.test:
    logpath = "{}/{}".format(config.log_path, config.save_path) + "_test.log"
  else:
    logpath = "{}/{}".format(config.log_path, config.save_path) + ".log"
	
  logger = Logger(logpath)

  ckpt_file = os.path.join(config.log_path, config.save_path) + "/model"


  logger.Log ('config: %s'%(config_json,)) 

  Train,Dev,Test,vocab = reader.file2seqid(config)
  pretrain_embedding = generate_embeddings(config.glove_path,vocab, config.glove_dir) 
  tf.set_random_seed(config.seed)
  with tf.Graph().as_default():
    #initializer = tf.contrib.layers.xavier_initializer(uniform=True,seed=None,dtype=tf.float32)
    initializer = tf.random_uniform_initializer(-config.init_scale,config.init_scale)
    with tf.name_scope("Train"):
      with tf.variable_scope("Model", reuse=None, initializer=initializer):
        m = MyModel(is_training=True, config=config,pretrain_embedding=pretrain_embedding)
    
    with tf.name_scope("Valid"):
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mvalid = MyModel(is_training=False,config=eval_config,pretrain_embedding=pretrain_embedding)

    with tf.name_scope("Test"):
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mtest = MyModel(is_training=False, config=eval_config,pretrain_embedding=pretrain_embedding)
    
    #sv = tf.train.Supervisor(logdir=config.save_path)
    sv = tf.train.Supervisor()
    with sv.managed_session() as session:
      logger.Log("\n\nmodel params:%s"%(np.sum([np.product([xi.value for xi in x.get_shape()]) for x in tf.trainable_variables()])))
      t0=time.time()
 
      best_accuracy = config.best_accuracy
      best_val_epoch = config.best_val_epoch
      last_change_epoch = 0

      best_test_acc=0
      best_test_epoch=0
      
      #global train_acc
      for i in range(config.MAXITER):
        start_time=time.time()
        train_acc,train_loss,train_global_step,learning_rate,train_pred_total,train_true_total= run_epoch(session,data=Train, model=m,config=config, eval_op=m.optim, verbose=True)
        logger.Log("Epoch: %d train_acc: %.3f train_loss %.3f train_global_step:%s" % (i,train_acc,train_loss,train_global_step))

        dev_acc,dev_loss,_,_,dev_pred_total,dev_true_total= run_epoch(session,data=Dev,model=mvalid,config=eval_config)
        logger.Log("Epoch: %d dev_acc: %.3f dev_loss %.3f" % (i, dev_acc,dev_loss))

      
        sys.stdout.flush()
        # if <= then update 
        if best_accuracy <= dev_acc:
          best_accuracy = dev_acc
          best_val_epoch = i
          if config.save_path:
            logger.Log("Saving model %d to %s." % (i,ckpt_file))
            
            sv.saver.save(session,ckpt_file, global_step=train_global_step)

        if (i - best_val_epoch > config.update_learning)and(i-last_change_epoch>config.change_epoch):
          if learning_rate>config.min_lr:
            lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
            new_learning_rate = config.learning_rate * lr_decay
            last_change_epoch= i
            logger.Log("learning_rate-->change!Dang!Dang!Dang!-->%.10f"%(new_learning_rate))
            m.assign_lr(session,new_learning_rate)

          logger.Log (time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()))

        
        end_time=time.time()
        logger.Log("-------- all_training time: %s one_epoch time: %s\n " % ((end_time-t0)//60, (end_time-start_time)//60))
        if i - best_val_epoch > config.early_stopping:
          logger.Log ("best_val_epoch:%d  best_val_accuracy:%.3f"%(best_val_epoch,best_accuracy))
          logging.info("Normal Early stop")
          logger.Log (time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()))
          break        
        elif i == config.MAXITER-1:
          logger.Log ("best_val_epoch:%d  best_val_accuracy:%.3f"%(best_val_epoch,best_accuracy))
          logging.info("Finishe Training")

      logger.Log("\n\n-----------------------testing!!!-----------------------" )
      #### evaluate on the test set
      #1. restore the best parameters
      #2. test on the test set and logger.Log confusion matrix 
      #ckpt = tf.train.get_checkpoint_state(config.save_path)
      ckpt = tf.train.get_checkpoint_state(ckpt_file)
      if ckpt and ckpt.model_checkpoint_path: 
        #init_step = int(ckpt.model_checkpoint_path.rsplit('-',1)[1])
        logger.Log ("restore best model:%s for testing:"%(ckpt.model_checkpoint_path))
        sv.saver.restore(session, ckpt.model_checkpoint_path)

      test_acc,test_loss,_,_,test_pred_total,test_true_total = run_epoch(session, data=Test,model=mtest,config=eval_config)
      logger.Log ("best_test_accuracy:%.3f test_loss %.3f "%(test_acc,test_loss))
      logger.Log("\nthe confuse_matrix of test:\n")
      logger.Log(confuse_matrix(true_label_total=test_true_total,pred_label_total=test_pred_total,config=config))
      logger.Log (time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()))
Exemplo n.º 7
0
def mini_topsim(config_file=None):
    """
    Loads parameters from config_file, starts the sim, plots and writes to file

    :param config_file: config_file with simulation parameters


    Loads parameters from config_file.   
    If no config_file is passed passed, None is returned.
    Creates a Surface Object and starts the simulation. 
    The correct timestep is calculated with the timestep function 
    from the advance module. 
    
    If a *.srf_save file with the same filename exists, the plot function with
    both surfaces is called.

    """
    print('Running miniTopSim ...')

    if config_file is None:
        if len(sys.argv) > 1:
            config_filename = sys.argv[1]
        else:
            sys.exit('No Config file passed')
            #config_filename = 'cosine.cfg'

        config_file = config_filename

    if not config_file.endswith('.cfg'):
        print('Error: Incorrect config.')
        sys.exit()

    filename = os.path.splitext(config_file)[0] + '.srf'

    if os.path.exists(filename):
        os.remove(filename)

    par.load_parameters(config_file)
    dir_path = os.path.dirname(os.path.realpath(config_file))
    par.INITIAL_SURFACE_FILE = os.path.join(dir_path, par.INITIAL_SURFACE_FILE)

    tend = par.TOTAL_TIME
    dt = par.TIME_STEP

    surface = Surface()

    sputter.init_sputtering()
    time = 0
    start_simulation_time = currenttime()

    while time < tend:
        surface.write(time, filename)
        dtime = timestep(dt, time, tend)
        advance(surface, dtime)
        surface.eliminate_overhangs()
        time += dtime

    stop_simulation_time = currenttime()
    simulation_time = stop_simulation_time - start_simulation_time
    print('The Simulation took: {}s'.format(float(simulation_time)))
    surface.write(time, filename)

    filename_save = filename + '_save'

    if par.PLOT_SURFACE:
        if os.path.exists(filename_save):
            print('*.srf_save file exists... plotting both!')
            plot.plot(filename, filename_save)
        else:
            plot.plot(filename)
Exemplo n.º 8
0
#!/usr/bin/python

from pprint import pprint
import yaml

import parameters
import update_rules

print yaml.dump(update_rules.RProp())

p = parameters.load_parameters('mnist-vanilla.yaml')

print p.dataset
Exemplo n.º 9
0
            time = np.fromstring(data[3*n+1])
            omega = np.fromstring(data[3*n+2])
            u_set.add(param[0])
            V_set.add(param[1])
            Cr_set.add(param[2])
            k_set.add(param[3])
            Ce_set.add(param[4])
            lamb_set.add(param[5])
            time_omega = np.transpose(np.vstack((time, omega)))
            tab[len(u_set)-1, len(V_set)-1, len(Cr_set)-1, len(k_set)-1, len(Ce_set)-1, len(lamb_set)-1] = time_omega
    return tab, u_array, V_array, Cr_array, k_array, Ce_array, lamb_array


if __name__ == '__main__':
    C_e_command = 20
    _J, _C_r, _R, _k, _V, _C_e, _lamb = parameters.load_parameters()
    om_t = simulation.om_theo(_C_r, _R, _k, 9, C_e_command, _lamb)

    # ============| Affichage des données |=================
    _N_samples = 1000
    _V_array = np.array([9] * _N_samples)

    t, v, o, c, p = simulation.euler(simulation.f, 0, 20, _N_samples, 0, _V_array, C_e=C_e_command)

    k = ind_reg_perm(o, om_t)
    print(f"Indice du régime permanent : {k}")
    print(f"Temps auquel regime permanent atteint : {t[k]:.3f}")
    print(f"Comparaison des valeurs : {om_t:.4f} vs {o[k]:.4f}")

    save_o, save_t = val_sauv_om_t(o, t, k, 100)
    print("Nombre de valeurs retenues : ", len(save_o))
Exemplo n.º 10
0
import matplotlib.pyplot as plt


config_filename="test_overhangs.cfg"
config_file = os.path.join(os.path.dirname(__file__), config_filename)

if not config_file.endswith('.cfg'):
    print('Error: Incorrect config.')
    sys.exit()

filename = os.path.splitext(config_file)[0] + '.srf'

if os.path.exists(filename):
    os.remove(filename)

par.load_parameters(config_file)

#Example Surface
surface = Surface()
surface.xvals=[0.,1.,2.,1.,1.5,3.,4.,5.,4.,5.,6.,7.,8.,7.,7.5,8.,9.]
surface.yvals=[10.,10.,10.,15.,17.,18.,18.,18.,12.,13.,13.,12.,11.,10.,9.,8.,8.]

plt.title('Eliminate Overhangs Test')
plt.ylabel('Surface Y values')
plt.xlabel('Surface X values')
plt.plot(surface.xvals, surface.yvals, 'b-', label='With Overhangs')

surface.eliminate_overhangs()

plt.plot(surface.xvals, surface.yvals, 'r-', label='Without Overhangs')
plt.legend()
Exemplo n.º 11
0
from PyQt5.QtWidgets import (QPushButton, QWidget, QAction, QSpacerItem, QSizePolicy, QVBoxLayout, QHBoxLayout, QLabel,
                             QScrollArea, QLineEdit, QCheckBox, QMenu, QComboBox, QTextEdit, QTableWidget, QHeaderView,
                             QToolButton)
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QValidator, QColor, QDoubleValidator
# from parameters import translate, translate
from parameters import load_parameters, translate, is_types, is_type
from item_classes import BaseObject

param_dict = load_parameters()
armor_names = param_dict["armor"]


line_edit_style = "QLineEdit { background: rgba(255, 255, 255, 100); border-width: 0px;\
 alternate-background-color: rgba(200,200,200,50); font: bold 10px; margin: 0px;}"


class ScrollContainer(QWidget):
    item_created = pyqtSignal(str, object)
    item_equipped = pyqtSignal(str, object, bool)
    item_removed = pyqtSignal(str, object)
    item_edited = pyqtSignal(str, object)

    def __init__(self, name, button_text, content_widget, popup=None, label=None, **kwargs):
        QWidget.__init__(self)
        # self.parent = parent
        self.name = name
        self.kwargs = kwargs
        self.layout = QVBoxLayout()
        self.label = QLabel(translate(name))
Exemplo n.º 12
0
import parameters
from six.moves import xrange
import numpy as np
_PAD="_PAD"
_UNK= "_UNK"
_GO= "_GO"
_EOS= "_EOS"
_START_VOCAB=[_PAD,_UNK,_GO,_EOS]
LABEL_MAP={"neutral":0,"entailment":1,"entails":1,"contradiction":2,"hidden":-1,"NEUTRAL":0,"ENTAILMENT":1,"CONTRADICTION":2}

PAD_ID=0
UNK_ID=1
GO_ID =2
EOS_ID =3
#config=Config.load_parameters()#return config form a function of Config.py
config=parameters.load_parameters()#return config form a function of Config.py
def filter_length(seq,maxlen):
  if len(seq)>maxlen:
    new_seq=seq[:maxlen]
  else:
    new_seq=seq
  return new_seq

def load_data(train,vocab,labels=LABEL_MAP):
    X,Y,Z=[],[],[]
    #f_l=open("seq.txt","w+")
    for p,h,l in train:
        p=map_to_idx(tokenize(p),vocab)+ [EOS_ID]
        h=[GO_ID]+map_to_idx(tokenize(h),vocab)+ [EOS_ID]
        p=filter_length(p,config.xmaxlen)
        h=filter_length(h,config.ymaxlen)
Exemplo n.º 13
0
import re
import random
import json
import gzip
import pickle
import collections
import numpy as np
from tqdm import tqdm
import nltk
from nltk.corpus import wordnet as wn
import os
import pickle
import multiprocessing
import parameters

config = parameters.load_parameters()


def generate_embeddings(pretrain_embedding_path, word_indices,
                        embedding_file_name):
    embedding_dir = config.embedding_dir
    if not os.path.exists(config.embedding_dir):
        os.makedirs(embedding_dir)

    #embedding_path = os.path.join(embedding_dir,embedding_file_name)
    embedding_path = embedding_file_name

    #logger.Log("embedding path:%s %r"%(embedding_path, os.path.exists(embedding_path)))
    print("embedding path:%s %r" %
          (embedding_path, os.path.exists(embedding_path)))
    if os.path.exists(embedding_path) and config.rebuiltGloveEmb == False:
Exemplo n.º 14
0
    """Profiling to get insight in performance issues."""
    import cProfile, pstats, io
    pr = cProfile.Profile()
    pr.enable()
    generate_commands()
    pr.disable()
    s = io.StringIO()
    ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
    ps.print_stats()
    logging.info(s.getvalue())
    s.close()


# Initialize the game.
game = Game()
load_parameters(game)
logging.info(param)

# Define some globals for convenience.
me = game.me
game_map = game.game_map

MapData(game, None)
while(time.time() - bot_start < 9.7 and DistanceCalculator.needs_precompute()):
    DistanceCalculator.precompute()

# Play the game.
game.ready("TeamSchildpad")
while True:
    game.update_frame()
    start = time.time()
Exemplo n.º 15
0
def mini_topsim_timing():
    """
    Reads the Simulation parameters, starts the sim, plots and writes to file

    the first sys.argv[1] is the config file name.

    if no sys argument is passed the programm will stop.
    Writes all calculated datapoints to a file with the
    filenname: <config_file_name>.srf

    """
    print('Running miniTopSim ...')

    if len(sys.argv) > 1:
        config_filename = sys.argv[1]
    else:
        print("Error: No config passed.")
        sys.exit()

    config_file = os.path.join(os.path.dirname(__file__), config_filename)

    if not config_file.endswith('.cfg'):
        print('Error: Incorrect config.')
        sys.exit()

    filename = os.path.splitext(config_file)[0] + '.srf'

    if os.path.exists(filename):
        os.remove(filename)

    par.load_parameters(config_file)

    tend = par.TOTAL_TIME
    dt = par.TIME_STEP
    par.DELTA_X = 10
    simulation_time_array = np.empty((2, 0))

    while par.DELTA_X > 0.2:
        # print(par.DELTA_X)
        surface = Surface()
        time = 0
        start_simulation_time = currenttime()
        while time < tend:
            surface.write(time, filename)
            dtime = timestep(dt, time, tend)
            advance(surface, dtime)
            time += dtime

        stop_simulation_time = currenttime()
        simulation_time = stop_simulation_time - start_simulation_time
        simulation_time_array = np.append(simulation_time_array,
                                          np.array(
                                              (int(100 / par.DELTA_X),
                                               simulation_time)).reshape(2, 1),
                                          axis=1)
        # print('The Simulation took: {}s'.format(float(simulation_time)))
        # print(np.array((int(100/par.DELTA_X))))
        # print(par.DELTA_X)
        surface.write(time, filename)
        par.DELTA_X = par.DELTA_X - 0.6
        # print(par.DELTA_X)

    plt.title('Simulationtime in dependence of the number of points')
    plt.plot(simulation_time_array[0], simulation_time_array[1], 'b+-')
    plt.xscale('log')
    plt.yscale('log')
    plt.xlabel('Number of points')
    plt.ylabel('Time in Seconds')
    plt.grid(which='both')
    plt.show()
    if par.PLOT_SURFACE:
        plot.plot(filename)