def main(global_config, **main_settings): from {{cookiecutter.project_slug}}.lib.factories.root import RootFactory init_settings(read_config(global_config['__file__'])) init_sqlalchemy() auth_settings = settings['auth'] authentication_policy = DefaultAuthenticationPolicy( secret=auth_settings['secret'], timeout=auth_settings.get('timeout'), reissue_time=auth_settings.get('reissue_time'), callback=get_principals, http_only=True, hashalg='sha512' ) config = Configurator( authentication_policy=authentication_policy, authorization_policy=ACLAuthorizationPolicy(), root_factory=RootFactory, settings=settings ) config.scan('{{cookiecutter.project_slug}}.handlers') config.add_request_method(get_authenticated_user, 'user', reify=True) init_cache(settings['app:main']) return config.make_wsgi_app()
def __init__(self): self.config = read_config() self.move_list = self.config["move_list"] self.start = self.config["start"] self.goal = self.config["goal"] self.walls = self.config["walls"] self.pits = self.config["pits"] self.map_size = self.config["map_size"] # config data for mdp self.threshold_diff = self.config["threshold_difference"] self.max_iteration = self.config["max_iterations"] self.reward_step = self.config["reward_for_each_step"] self.reward_wall = self.config["reward_for_hitting_wall"] self.reward_goal = self.config["reward_for_reaching_goal"] self.reward_pit = self.config["reward_for_falling_in_pit"] self.discount_factor = self.config["discount_factor"] self.learning_factor = 0.001 # initialize the map self.map = self.init_map_structure() self.robot_mover = RobotMover(self.map) self.compute_map_policy() self.policy_list = self.flatten_map(self.map)
def main(h_tab,start_t,end_t): # if date == None: # date = datetime.datetime.now().strftime('%Y-%m-%d') db_config = read_config('db_config') db = pymysql.connect(host=db_config["host"], user=db_config["user"], password=db_config["password"], database=db_config["database"]) cursor = db.cursor() # 使用cursor()方法获取用于执行SQL语句的游标 sql = "select distinct stock_id,stock_name from stock_trade_data where stock_id like '%{}'".format(h_tab) cursor.execute(sql) stock_id_list = cursor.fetchall() # date_time = datetime.datetime.strptime(date, '%Y-%m-%d') # start_t = (date_time - datetime.timedelta(days=90)).strftime('%Y-%m-%d') # stock_id_list = (('600121','郑州煤电'),) for ids_tuple in stock_id_list: ids = ids_tuple[0] print('ids:',ids) if ids[0:3] =='300' or ids[0:3] =='688': continue stock_name = ids_tuple[1] # trade_code = re.sub('-', '', date[0:10]) + id sql = "SELECT trade_date FROM stock_trade_data \ where trade_date >= '{0}' and trade_date <= '{1}' and stock_id = '{2}' " \ "and increase >= 9.75 ".format( start_t, end_t,ids) cursor.execute(sql) date_res = cursor.fetchall() print('date_res:',date_res) if len(date_res) <3 : continue date_list = [] for date in date_res: # print('date:',type(date[0])) date_list.append(date[0]) date_list.sort(reverse=True) # date_list = [datetime.datetime(2019, 5, 31, 0, 0), datetime.datetime(2019, 3, 25, 0, 0), datetime.datetime(2019, 2, 21, 0, 0), datetime.datetime(2018, 11, 16, 0, 0), datetime.datetime(2018, 11, 15, 0, 0)] print('date_list:',date_list) i = 0 delta = datetime.timedelta(days =3) count = 0 while i < (len(date_list)-2): # date = datetime.datetime.strptime(date_res[i], "%Y-%m-%d") date = date_list[i] # date_second = datetime.datetime.strptime(date_res[j], "%Y-%m-%d") date_second = date_list[i+1] print('cha:',date , date_second) if date - date_second > delta: print('flag:',date_list[i].strftime("%Y%m%d")) if count >= 3: trade_code = date_list[i].strftime("%Y%m%d") + ids # trade_code = re.sub('-', '', date_res[i][0:10]) + ids save(db, trade_code, ids, stock_name, date_list[i].strftime("%Y-%m-%d"), count) count = 0 else: count = 0 i += 1 continue else: count += 1 i += 1
def __init__(self): rospy.init_node("robot") # self.move_list = read_config()["move_list"] self.move_list = read_config()["move_list"] self.mapSize = read_config()["map_size"] self.start = read_config()["start"] self.goal = read_config()["goal"] self.walls = read_config()["walls"] self.pits = read_config()["pits"] self.cost = read_config()["reward_for_each_step"] rospy.sleep(1) self.pathPub = rospy.Publisher("/results/path_list", AStarPath, queue_size=10) self.completePub = rospy.Publisher("/map_node/sim_complete", Bool, queue_size=10) self.mdpPub = rospy.Publisher("/results/policy_list", PolicyList, queue_size=10) rospy.sleep(3) pathList = astar(self.move_list, self.mapSize, self.start, self.goal, self.walls, self.pits, self.cost) print pathList for item in pathList: print item self.pathPub.publish(item) rospy.sleep(1) print "should publish" self.mdp = mdp() self.mdp.looping() self.QlearningEpsilon = QLearningEpislon() self.QlearningLValue = QLearningLValue() self.QlearningWithUncertainty = QLearningWithUncertainty() self.QlearningEpsilon.learning() self.QlearningLValue.learning() self.QlearningWithUncertainty.learning() policy = self.mdp.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy = self.QlearningEpsilon.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy = self.QlearningLValue.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy = self.QlearningWithUncertainty.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) self.completePub.publish(True) rospy.sleep(1) rospy.signal_shutdown("finish")
def __init__(self): self.move_list = read_config()["move_list"] self.mapSize = read_config()["map_size"] self.start = read_config()["start"] self.goal = read_config()["goal"] self.walls = dict((tuple(el), 0) for el in read_config()["walls"]) self.pits = dict((tuple(el), 0) for el in read_config()["pits"]) self.cost = read_config()["reward_for_each_step"] self.goalReward = read_config()["reward_for_reaching_goal"] self.pitReward = read_config()["reward_for_falling_in_pit"] self.wallReward = read_config()["reward_for_hitting_wall"] self.factor = read_config()['discount_factor'] self.alpha = 0.2 self.c = 15.0 # self.move_foward = read_config()["prob_move_forward"] # self.move_left = read_config()["prob_move_left"] # self.move_right = read_config()["prob_move_right"] # self.move_backward = read_config()["prob_move_backward"] self.iteration = read_config()["max_iterations"] # self.diff = read_config()[ "threshold_difference"] self.QLearningMap = [] self.moveString = { "E": (0, 1), "W": (0, -1), "S": (1, 0), "N": (-1, 0) } slot.initialMap(self.walls, self.pits, self.goal) for row in range(0, self.mapSize[0]): #[3,4] tempRow = [] for column in range(0, self.mapSize[1]): #x tempRow.append(slot(row, column)) self.QLearningMap.append(tempRow)
def __init__(self): self.move_list = read_config()["move_list"] self.mapSize = read_config()["map_size"] self.start = read_config()["start"] self.goal = read_config()["goal"] self.walls = dict((tuple(el),0) for el in read_config()["walls"]) self.pits =dict((tuple(el),0) for el in read_config()["pits"]) self.cost = read_config()["reward_for_each_step"] self.goalReward = read_config()["reward_for_reaching_goal"] self.pitReward = read_config()["reward_for_falling_in_pit"] self.wallReward = read_config()["reward_for_hitting_wall"] self.factor = read_config()['discount_factor'] self.alpha = 0.2 self.c = 15.0 # self.move_foward = read_config()["prob_move_forward"] # self.move_left = read_config()["prob_move_left"] # self.move_right = read_config()["prob_move_right"] # self.move_backward = read_config()["prob_move_backward"] self.iteration = read_config() ["max_iterations"] # self.diff = read_config()[ "threshold_difference"] self.QLearningMap = [] self. moveString = {"E":(0, 1), "W":(0, -1), "S":(1, 0), "N":(-1, 0)} slot.initialMap(self.walls,self.pits,self.goal) for row in range(0,self.mapSize[0]): #[3,4] tempRow = [] for column in range(0,self.mapSize[1]): #x tempRow.append(slot(row,column)) self.QLearningMap.append(tempRow)
command += f + " " os.system(command) print "\n. Results are packaged into results.tar.gz" ###################################################################### # # main # splash() """If the user gives a configpath, then it's referenced data will be imported into the database.""" configpath = ap.getOptionalArg("--configpath") if configpath != False: ap.params = read_config( configpath ) if False == validate_config(ap.params): print "\n. Error: something is wrong with your configuration file." print ". Look at previous errors for more detail." exit(0) """If the user didn't give a configpath, then we at least need a dbpath (to load a prior database).""" dbpath = ap.getOptionalArg("--dbpath") if configpath == False: print "\n. Error, you need to specify a configuration path, using --configpath" exit() """Regardless of which options are being executed, the DB gets
command += f + " " os.system(command) print "\n. Results are packaged into results.tar.gz" ###################################################################### # # main # splash() """If the user gives a configpath, then it's referenced data will be imported into the database.""" configpath = ap.getOptionalArg("--configpath") if configpath != False: ap.params = read_config(configpath) if False == validate_config(ap.params): print "\n. Error: something is wrong with your configuration file." print ". Look at previous errors for more detail." exit(0) """If the user didn't give a configpath, then we at least need a dbpath (to load a prior database).""" dbpath = ap.getOptionalArg("--dbpath") if configpath == False: print "\n. Error, you need to specify a configuration path, using --configpath" exit() """Regardless of which options are being executed, the DB gets built, or rebuilt, depending on its status.""" con = build_db(dbpath=dbpath)
from read_config import * from observables import * from dmft_cycle import * from csc_flow import csc_flow_control import toolset as toolset # timing information if mpi.is_master_node(): global_start = timer() # reading configuration for calculation general_parameters = {} solver_parameters = {} if mpi.is_master_node(): if len(sys.argv) > 1: print 'reading the config file ' + str(sys.argv[1]) general_parameters, solver_parameters = read_config(str(sys.argv[1])) general_parameters['config_file'] = str(sys.argv[1]) else: print 'reading the config file dmft_config.ini' general_parameters, solver_parameters = read_config('dmft_config.ini') general_parameters['config_file'] = 'dmft_config.ini' print '-------------------------- \n General parameters:' for key, value in general_parameters.iteritems(): print "{0: <20}".format(key) + "{0: <4}".format(str(value)) print '-------------------------- \n Solver parameters:' for key, value in solver_parameters.iteritems(): print "{0: <20}".format(key) + "{0: <4}".format(str(value)) solver_parameters = mpi.bcast(solver_parameters) general_parameters = mpi.bcast(general_parameters)
def __init__(self): self.move_list = read_config()["move_list"] self.mapSize = read_config()["map_size"] self.start = read_config()["start"] self.goal = read_config()["goal"] self.walls = read_config()["walls"] self.pits = read_config()["pits"] self.cost = read_config()["reward_for_each_step"] self.goalReward = read_config()["reward_for_reaching_goal"] self.pitReward = read_config()["reward_for_falling_in_pit"] self.wallReward = read_config()["reward_for_hitting_wall"] self.factor = read_config()['discount_factor'] self.move_foward = read_config()["prob_move_forward"] self.move_left = read_config()["prob_move_left"] self.move_right = read_config()["prob_move_right"] self.move_backward = read_config()["prob_move_backward"] self.iteration = read_config()["max_iterations"] self.diff = read_config()["threshold_difference"] self.MDPmap = [] self.moveString = { (0, 1): "E", (0, -1): "W", (1, 0): "S", (-1, 0): "N" } for row in range(0, self.mapSize[0]): #[3,4] tempRow = [] for column in range(0, self.mapSize[1]): #x tempRow.append([float(0), "Empty"]) self.MDPmap.append(tempRow) self.MDPmap[self.goal[0]][self.goal[1]] = [self.goalReward, "GOAL"] for wall in self.walls: self.MDPmap[wall[0]][wall[1]] = [self.wallReward, "WALL"] for pit in self.pits: self.MDPmap[pit[0]][pit[1]] = [self.pitReward, "PIT"]
#!/bin/bash/env python # read the config again from read_config import * config = read_config("./experiment.config") svm_dir = config["libsvm_path"] import os current_dir = os.path.abspath("./") svm_dir = svm_dir os.chdir(svm_dir) import svmutil os.chdir(current_dir)
def __init__(self): self.move_list = read_config()["move_list"] self.mapSize = read_config()["map_size"] self.start = read_config()["start"] self.goal = read_config()["goal"] self.walls = read_config()["walls"] self.pits = read_config()["pits"] self.cost = read_config()["reward_for_each_step"] self.goalReward = read_config()["reward_for_reaching_goal"] self.pitReward = read_config()["reward_for_falling_in_pit"] self.wallReward = read_config()["reward_for_hitting_wall"] self.factor = read_config()['discount_factor'] self.move_foward = read_config()["prob_move_forward"] self.move_left = read_config()["prob_move_left"] self.move_right = read_config()["prob_move_right"] self.move_backward = read_config()["prob_move_backward"] self.iteration = read_config() ["max_iterations"] self.diff = read_config()[ "threshold_difference"] self.MDPmap = [] self. moveString = {(0, 1):"E", (0, -1):"W", (1, 0):"S", (-1, 0):"N"} for row in range(0,self.mapSize[0]): #[3,4] tempRow = [] for column in range(0,self.mapSize[1]): #x tempRow.append([float(0),"Empty"]) self.MDPmap.append(tempRow) self.MDPmap[self.goal[0]][self.goal[1]] =[self.goalReward,"GOAL"] for wall in self.walls: self.MDPmap[wall[0]][wall[1]] = [self.wallReward,"WALL"] for pit in self.pits: self.MDPmap[pit[0]][pit[1]]=[self.pitReward,"PIT"]
import sys from typing import Mapping, Any from pyspark.sql import SparkSession from {{cookiecutter.project_slug}}.config import read_config, setup_mlflow spark: SparkSession = SparkSession.builder.appName('{{cookiecutter.project_name}}').getOrCreate() conf: Mapping[str, Any] = read_config('config.yaml', sys.argv[1]) setup_mlflow(**conf) #Add your training code here
import expandcoor import read_config import random_cluster import energy import time from expandcoor import addcoor from read_config import read_config from read_config import read_config1 from read_config import read_config2 from random_cluster import random_cluster if __name__ == '__main__': # t_start = time.time() # 读取配置文件 AL = read_config1('mp-2254.txt') O = read_config2('mp-2254.txt') aa = read_config('mp-2254.txt') alpha = aa['alpha'] beta = aa['beta'] gama = aa['gamma'] a = aa['a'] b = aa['b'] c = aa['c'] # 真实的三维坐标 AL_real = [] O_real = [] for i in AL: x = i[0] * a y = i[1] * b z = i[2] * c AL_real.append([x, y, z])
def main(args): # read in config parameters and validate against spec file config_file = 'config.ini' configspec_file = 'configspec.ini' params = read_config(config_file, configspec_file) # generate the LA phonon eid kernel if required if (params['run_params'].run_phonon_eid): laphononeid.eidkernel(params) # choose objective function from objective function dictionaries obj_gates = { 'twolevel': obj_twolevel, 'twolevel_dm': obj_twolevel_dm, 'threelevel': obj_threelevel, 'onedot': obj_onedot, 'twodot': obj_twodot, 'threedot': obj_threedot, 'crot': obj_crot } obj_func = obj_gates[params['run_params'].gate] # choose amplitude and phase mask function from the mask function dictionary ampmasks = { 'ampmask_chen': chenAmpMask, 'mask_slmcc': slmccAmpMask, 'ampmask_none': noAmpMask, 'dichrome': dichromaticMask } phasemasks = { 'phasemask_cos': cosinePhaseMask, 'phasemask_poly': polyPhaseMask, 'mask_slmcc': slmccPhaseMask, 'phasemask_none': noPhaseMask } key = params['run_params'].run_ampmask if key in ampmasks: ampmaskfunc = ampmasks[key] else: print "amp mask not valid" key = params['run_params'].run_phasemask if key in phasemasks: phasemaskfunc = phasemasks[key] else: print "phase mask not valid" # read in configuration file and convert parameters to atomic Rhydberg units x = read_params(config_file) x = convert_aru(x, config_file, TO_ARU) # run optimization if required and get time dependence run_optimize = params['run_params'].optimize if run_optimize == True: x = nloptimize(obj_func, ampmaskfunc, phasemaskfunc, params, config_file) fidelity = timedep(obj_func, ampmaskfunc, phasemaskfunc, params, config_file) else: fidelity = timedep(obj_func, ampmaskfunc, phasemaskfunc, params, config_file) # clean up directory os.system("rm *.pyc")
def show_read_config(self): self.read_config_widget = read_config() self.read_config_widget.show()
from {{cookiecutter.repo_name}}.inference import InferenceStage def read_inp_file(filepath): raise NotImplementedError def write_output(out, filepath): raise NotImplementedError def apply_tfms(dataset, tfm_list): raise NotImplementedError if __name__ == "__main__": _, config_file, *input_files, output_file, inference_file = \ check_args_num(5, strict=False) set_random_seed() inp = [read_inp_file(inp_file) for inp_file in input_files] config = read_config(config_file) tfm_list = config.get('tfm_list') out = apply_tfms(inp, tfm_list) write_output(out, output_file) inference_stage = InferenceStage(input_list=input_files, params=config, transformers=tfm_list) inference_stage.save(inference_file)
def __init__(self): rospy.init_node("robot") # self.move_list = read_config()["move_list"] self.move_list = read_config()["move_list"] self.mapSize = read_config()["map_size"] self.start = read_config()["start"] self.goal = read_config()["goal"] self.walls = read_config()["walls"] self.pits = read_config()["pits"] self.cost = read_config()["reward_for_each_step"] rospy.sleep(1) self.pathPub = rospy.Publisher("/results/path_list",AStarPath,queue_size=10) self.completePub = rospy.Publisher("/map_node/sim_complete",Bool,queue_size=10) self.mdpPub = rospy.Publisher("/results/policy_list",PolicyList,queue_size=10) rospy.sleep(3) pathList = astar(self.move_list,self.mapSize,self.start,self.goal,self.walls,self.pits,self.cost) print pathList for item in pathList: print item self.pathPub.publish(item) rospy.sleep(1) print "should publish" self.mdp = mdp() self.mdp.looping() self.QlearningEpsilon = QLearningEpislon() self.QlearningLValue = QLearningLValue() self.QlearningWithUncertainty = QLearningWithUncertainty() self.QlearningEpsilon.learning() self.QlearningLValue.learning() self.QlearningWithUncertainty.learning() policy = self.mdp.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy = self.QlearningEpsilon.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy =self.QlearningLValue.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) policy =self.QlearningWithUncertainty.convertToList() self.mdpPub.publish(policy) rospy.sleep(1) self.completePub.publish(True) rospy.sleep(1) rospy.signal_shutdown("finish")
from read_config import * a = read_config()["pits"] # pit = dict((tuple(el),0) for el in read_config()["goal"]) wall = dict((tuple(el),0) for el in read_config()["walls"]) wall[(5,9)] = 1 print max(wall, key=wall.get) print wall if (5,9) in wall: print True