def get_buffer(buffer_args: Config): if buffer_args.get('buffer_size', 0) <= 0: logger.info( 'This algorithm does not need sepecify a data buffer oustside the model.' ) return None _type = buffer_args.get('type', None) if _type == 'ER': logger.info('ER') from utils.replay_buffer import ExperienceReplay as Buffer elif _type == 'PER': logger.info('PER') from utils.replay_buffer import PrioritizedExperienceReplay as Buffer elif _type == 'NstepER': logger.info('NstepER') from utils.replay_buffer import NStepExperienceReplay as Buffer elif _type == 'NstepPER': logger.info('NstepPER') from utils.replay_buffer import NStepPrioritizedExperienceReplay as Buffer elif _type == 'EpisodeER': logger.info('EpisodeER') from utils.replay_buffer import EpisodeExperienceReplay as Buffer else: logger.info('On-Policy DataBuffer') return None return Buffer(batch_size=buffer_args['batch_size'], capacity=buffer_args['buffer_size'], **buffer_args[_type].to_dict)
def load_hmd_model(log, conf, team_id, svc_type): """ Load HMD model :param log: Logger :param conf: Config :param team_id: TEAM_ID :param svc_type: SVC_TYPE :return: HMD model """ # Load HMD model log.info('--> Load HMD model') hmd_conf = Config() hmd_conf.init('brain-ta.conf') model_path = '{0}/{1}__0.hmdmodel'.format( hmd_conf.get('brain-ta.hmd.model.dir'), team_id) if os.path.exists(model_path): hmd_model_name = team_id else: if svc_type == 'H': hmd_model_name = conf.hmd_home_model_name elif svc_type == 'M': hmd_model_name = conf.hmd_mobile_model_name else: raise Exception( "Wrong format svc_type('H' or 'M') [{0}]".format(svc_type)) model_path = '{0}/{1}__0.hmdmodel'.format( hmd_conf.get('brain-ta.hmd.model.dir'), hmd_model_name) if not os.path.exists(model_path): raise Exception('Not existed HMD model [{0}]'.format(model_path)) hmd_model = hmd.load_hmd_model(conf.hmd_cate_delimiter, hmd_model_name) return hmd_model
def buildCorrectedData(header, spot_data, gathering_data): position = gathering_data[:, 0] speed = gathering_data[:, 1] alpha = (10000 / header['LineFreq']) # factor to fix frequency difference between xps and tdi cam aligned_size = min(int((len(position) / alpha)), len(spot_data)) spot_data = spot_data[1:aligned_size, :] # calculate valid lines and values x = [] y = [] for line in range(0, len(spot_data)): # position doesn't exist try : current_position = getFrequencyAlignedData(line, alpha, position) current_speed = getFrequencyAlignedData(line, alpha, speed) except IndexError: break # only store data in radius of pixelCount pixels # remove outlier and backwards travel if Config.get('FP_START') <= current_position and Config.get('FP_END') >= current_position \ and current_speed > 0 \ and np.min(spot_data[line, :]) < Config.get('CLAMP_MAX_INTENSITY') \ and np.max(spot_data[line, :]) >= Config.get('CLAMP_MIN_INTENSITY'): x.append(current_position) y.append(spot_data[line, :]) return np.array(x), np.array(y)
def __init__(self): logger.debug("camera: started") self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.bind((Config.get("CAM_HOST"), Config.get("CAM_PORT"))) self.socket.listen(10) self.program = Neunkdemo()
class NlpClient(object): def __init__(self, args): self.args = args self.conf = Config() self.json_printer = JsonPrinter() self.conf.init('brain-ta.conf') if args.engine.lower() == 'nlp1': self.remote = 'localhost:{0}'.format(self.conf.get('brain-ta.nlp.1.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub(channel) elif args.engine.lower() == 'nlp2': self.remote = 'localhost:{0}'.format(self.conf.get('brain-ta.nlp.2.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub(channel) elif args.engine.lower() == 'nlp3': self.remote = 'localhost:{0}'.format(self.conf.get('brain-ta.nlp.3.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub(channel) else: print 'Not existed Engine' raise Exception('Not existed Engine') def analyze(self, target_text): in_text = nlp_pb2.InputText() try: in_text.text = target_text except Exception: target_text = unicode(target_text, 'euc-kr').encode('utf-8') in_text.text = target_text in_text.lang = lang_pb2.kor in_text.split_sentence = True in_text.use_tokenizer = False in_text.use_space = self.args.space in_text.level = 0 in_text.keyword_frequency_level = 0 ret = self.stub.Analyze(in_text) # Result to Json format # json_text = json_format.MessageToJson(ret, True, True) # data = json.loads(json_text) # self.json_printer.pprint(data) result_list = list() for idx in range(len(ret.sentences)): nlp_word = str() morph_word = str() # text = ret.sentences[idx].text analysis = ret.sentences[idx].morps for ana_idx in range(len(analysis)): if analysis[ana_idx].type in ['VV', 'VA', 'VX', 'VCP', 'VCN']: nlp_word += ' {0}다'.format(analysis[ana_idx].lemma) morph_word += ' {0}다/{1}'.format(analysis[ana_idx].lemma, analysis[ana_idx].type) else: nlp_word += ' {0}'.format(analysis[ana_idx].lemma) morph_word += ' {0}/{1}'.format(analysis[ana_idx].lemma, analysis[ana_idx].type) nlp_sent = nlp_word.encode('utf-8').strip() morph_sent = morph_word.encode('utf-8').strip() result_list.append((target_text, nlp_sent, morph_sent)) return result_list
def __connect(self): logger.debug("client: connect to camera") try: connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM) connection.connect( (Config.get("CAM_HOST"), Config.get("CAM_PORT"))) except Exception as e: logger.error("client: cannot connect to camera") raise RuntimeError(e) return connection
def __init__(self): Timings.Slow() self.base_path = Config.get("CAM_RESULT_PATH") self.app = Application(backend="win32").connect( path=Config.get("CAM_PROGRAM_PATH")) self.dlg = self.app.Test9k self.frequency = self.dlg.Spinner4.get_buddy_control().text_block() return
def move_and_gather(self): logger.debug("client: move fp %s times from %s to %s", Config.get("ITERATIONS"), Config.get("FP_START"), Config.get("FP_END")) self.init_event() try: self.xps.GatheringReset(self.socket_id) logger.debug("client: reset xps data") self.xps.EventExtendedStart(self.socket_id) for i in range(Config.get("ITERATIONS")): self.xps.GroupMoveAbsolute(self.socket_id, Config.get("FP_GROUP"), [Config.get("FP_START")]) self.xps.GroupMoveAbsolute(self.socket_id, Config.get("FP_GROUP"), [Config.get("FP_END")]) self.xps.GatheringStopAndSave(self.socket_id) logger.debug("client: save xps data") except Exception as e: logger.error("client: XPS move error") logger.debug(e) raise RuntimeError(e) return True
def __init__(self): self.xps = XPS() self.socket_id = -1 try: logger.debug("client: connect to xps %s:%s", Config.get("XPS_HOST"), Config.get("XPS_PORT")) self.socket_id = self.xps.TCP_ConnectToServer( Config.get("XPS_HOST"), Config.get("XPS_PORT"), 10) logger.debug("client: xps socket id: %s", self.socket_id) except Exception as e: logger.error("client: cannot connect to xps %s") raise RuntimeError(e) self.__homing()
def test_money_list(self): # 设置url wallet_list = Config(url_file).get('money_list') self.url = wallet_list.get(self.date_index) My_http.set_url(self.url) print('第一步:设置url:' + self.url) if self.token == '0': token = Get_token().get_token() elif self.token == '1': token = None # 设置headers headers = Config().get('headers') headers['jcobToken'] = token My_http.set_headers(headers) print('第二步:设置header(token等)') print(headers) # 设置params params = Config().get('params') My_http.set_params(params) print('第三步:设置params') print(params) # 发送请求 self.return_json = My_http.get() print(self.return_json.json()) method = str(self.return_json.request )[int(str(self.return_json.request).find('[')) + 1:int(str(self.return_json.request).find(']'))] print("第四步:发送请求\n\t\t请求方法:" + method) # 校验结果 self.check_result() print('第五步:检查结果')
def json_focus(): y = 5.0 p = [('CAM_X_GROUP', [259.849]), ('CAM_Y_GROUP', [y]), ('CAM_Z_GROUP', [101.615])] runs = {} runs['id'] = 'focus' runs['desc'] = 'Iteration entlang der Y-Achse zur Fokus-Kalibrierung' runs['runs'] = [] for f in range(0, 9): run = {} if (len(runs['runs']) < 1): run['velocity'] = Config.get('FP_VELOCITY') run['frequency'] = 9615 run['position'] = p run['position'] = [ ('CAM_Y_GROUP', [y]), ] runs['runs'].append(run) y += .5 return runs
class HmdClient(object): def __init__(self): self.conf = Config() self.conf.init('brain-ta.conf') remote = 'localhost:{0}'.format(self.conf.get('brain-ta.hmd.front.port')) channel = grpc.insecure_channel(remote) self.stub = hmd_pb2_grpc.HmdClassifierStub(channel) def set_model(self, model_name, target_list): model = hmd_pb2.HmdModel() model.lang = lang_pb2.kor model.model = model_name rules_list = list() for item_dict in target_list: category = item_dict['category'] rule = item_dict['rule'] category_list = category.split('!@#$') hmd_client = hmd_pb2.HmdRule() hmd_client.rule = rule hmd_client.categories.extend(category_list) rules_list.append(hmd_client) model.rules.extend(rules_list) self.stub.SetModel(model) model_key = hmd_pb2.ModelKey() model_key.lang = lang_pb2.kor model_key.model = model_name
class NlpClient(object): def __init__(self, args): self.args = args self.conf = Config() self.conf.init('brain-ta.conf') if args.engine.lower() == 'nlp1': self.remote = 'localhost:{0}'.format( self.conf.get('brain-ta.nlp.1.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub( channel) elif args.engine.lower() == 'nlp2': self.remote = 'localhost:{0}'.format( self.conf.get('brain-ta.nlp.2.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub( channel) elif args.engine.lower() == 'nlp3': self.remote = 'localhost:{0}'.format( self.conf.get('brain-ta.nlp.3.kor.port')) channel = grpc.insecure_channel(self.remote) self.stub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub( channel) else: print 'Not existed Engine' raise Exception('Not existed Engine') def analyze(self, target_text): in_text = nlp_pb2.InputText() try: in_text.text = target_text except Exception: in_text.text = unicode(target_text, 'euc-kr').encode('utf-8') in_text.lang = lang_pb2.kor in_text.split_sentence = True in_text.use_tokenizer = False in_text.use_space = False in_text.level = 1 in_text.keyword_frequency_level = 0 try: ret = self.stub.Analyze(in_text) if ret: return True else: return False except Exception: return False
class HmdClient(object): def __init__(self, args): self.args = args self.conf = Config() self.conf.init('brain-ta.conf') remote = 'localhost:{0}'.format( self.conf.get('brain-ta.hmd.front.port')) channel = grpc.insecure_channel(remote) self.stub = hmd_pb2_grpc.HmdClassifierStub(channel) def load_hmd_model(self, model_name): model_path = '{0}/{1}__0.hmdmodel'.format( self.conf.get('brain-ta.hmd.model.dir'), model_name) if not os.path.exists(model_path): raise Exception( '[ERROR] Not existed HMD model. [{0}]'.format(model_path)) try: in_file = open(model_path, 'rb') hm = hmd_pb2.HmdModel() hm.ParseFromString(in_file.read()) in_file.close() return hm except Exception: raise Exception(traceback.format_exc()) def set_model(self, model_name, target_file_path): model = hmd_pb2.HmdModel() model.lang = lang_pb2.kor model.model = model_name rules_list = list() with open(target_file_path) as target_file: for line in target_file: line = line.strip() line_list = line.split(self.args.file_delimiter) if len(line_list) < 2: print '[ERROR] Line field count at least two [{0}]'.format( line) continue hmd_client = hmd_pb2.HmdRule() hmd_client.rule = line_list[-1] hmd_client.categories.extend(line_list[:-1]) rules_list.append(hmd_client) model.rules.extend(rules_list) self.stub.SetModel(model) model_key = hmd_pb2.ModelKey() model_key.lang = lang_pb2.kor model_key.model = model_name
def test_wallet_list(self): # 设置url wallet_list = Config(url_file).get('wallet_list') self.url = wallet_list.get(self.date_index) My_http.set_url(self.url) print('第一步:设置url:' + self.url) if self.token == '0': token = Get_token().get_token() elif self.token == '1': token = None # 设置headers headers = Config().get('headers') headers['jcobToken'] = token My_http.set_headers(headers) print('第二步:设置header(token等)') print(headers) # 设置params params = Config().get('params') My_http.set_params(params) print('第三步:设置params') print(params) if self.method == 'post': # 设置data data = { 'dateIndex': self.dateIndex, 'gameType': self.gameType, 'qiuFlag': self.qiuFlag } My_http.set_data(data) print(data) print('第四步:设置data') # 发送请求 self.return_json = My_http.postWithJson() print(self.return_json.json()) method = str(self.return_json.request )[int(str(self.return_json.request).find('[')) + 1:int(str(self.return_json.request).find(']'))] print("第五步:发送请求\n\t\t请求方法:" + method) # 校验结果 self.check_result() print('第六步:检查结果') elif self.method == 'get': # 发送请求 self.return_json = My_http.get() print(self.return_json.json()) method = str(self.return_json.request )[int(str(self.return_json.request).find('[')) + 1:int(str(self.return_json.request).find(']'))] print("第四步:发送请求\n\t\t请求方法:" + method) # 校验结果 self.check_result() print('第五步:检查结果')
def __init__(self, config: Config): ''' Input: gym_env_name: gym training environment id, i.e. CartPole-v0 n: environment number render_mode: mode of rendering, optional: first, last, all, random_[num] -> i.e. random_2, [list] -> i.e. [0, 2, 4] ''' self.n = config['env_num'] # environments number seed = config['env_seed'] render_mode = config.get('render_mode', 'first') def get_env(config: Config): gym_env_name = config['env_name'] action_skip = bool(config.get('action_skip', False)) skip = int(config.get('skip', 4)) obs_stack = bool(config.get('obs_stack', False)) stack = int(config.get('stack', 4)) noop = bool(config.get('noop', False)) noop_max = int(config.get('noop_max', 30)) obs_grayscale = bool(config.get('obs_grayscale', False)) obs_resize = bool(config.get('obs_resize', False)) resize = config.get('resize', [84, 84]) obs_scale = bool(config.get('obs_scale', False)) env = gym.make(gym_env_name) env = BaseEnv(env) if noop and isinstance(env.observation_space, Box) and len( env.observation_space.shape) == 3: env = NoopResetEnv(env, noop_max=noop_max) if action_skip: env = SkipEnv(env, skip=skip) if isinstance(env.observation_space, Box): if len(env.observation_space.shape) == 3: if obs_grayscale or obs_resize: env = GrayResizeEnv(env, resize=obs_resize, grayscale=obs_grayscale, width=resize[0], height=resize[-1]) if obs_scale: env = ScaleEnv(env) if obs_stack: env = StackEnv(env, stack=stack) else: env = OneHotObsEnv(env) if isinstance(env.action_space, Box) and len( env.action_space.shape) == 1: env = BoxActEnv(env) return env self._initialize(env=get_env(config)) self.envs = [get_env(config) for _ in range(self.n)] self.seeds = [seed + i for i in range(self.n)] # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] [env.seed(s) for env, s in zip(self.envs, self.seeds)] self._get_render_index(render_mode)
def __init__(self, logger_name='framework'): self.logger = logging.getLogger(logger_name) logging.root.setLevel(logging.NOTSET) c = Config().get('log') self.log_file_name = c.get( 'file_name') if c and c.get('file_name') else 'test.log' # 日志文件 self.backup_count = c.get( 'backup') if c and c.get('backup') else 5 # 保留的日志数量 # 日志输出级别 self.console_output_level = c.get( 'console_level') if c and c.get('console_level') else 'WARNING' self.file_output_level = c.get( 'file_level') if c and c.get('file_level') else 'DEBUG' # 日志输出格式 pattern = c.get('pattern') if c and c.get( 'pattern' ) else '%(asctime)s - %(name)s - %(levelname)s - %(message)s' self.formatter = logging.Formatter(pattern)
def __homing(self): try: # homing logger.debug('client: xps homing') for group in [ Config.get("CAM_X_GROUP"), Config.get("CAM_Y_GROUP"), Config.get("CAM_Z_GROUP"), Config.get("FP_GROUP") ]: logger.debug('client: init xps %s', group) self.xps.GroupInitialize(self.socket_id, group) logger.debug('client: home-search %s', group) self.xps.GroupHomeSearch(self.socket_id, group) except Exception as e: logger.error("client: init error") raise RuntimeError(e)
def __init__(self, logger_name='JMTool Autotest'): self.logger = logging.getLogger(logger_name) logging.root.setLevel(logging.NOTSET) c = Config().get('log') self.log_file_name = time.strftime("%Y-%m-%d-%H-%M-%S") + '_test.log' # 日志文件 self.backup_count = c.get('backup') if c and c.get('backup') else 5 # 保留的日志数量 # 日志输出级别 self.console_output_level = c.get('console_level') if c and c.get('console_level') else 'WARNING' self.file_output_level = c.get('file_level') if c and c.get('file_level') else 'DEBUG' # 日志输出格式 pattern = c.get('pattern') if c and c.get('pattern') else '%(asctime)s - %(name)s - %(levelname)s - %(message)s' self.formatter = logging.Formatter(pattern)
class Oracle(object): def __init__(self): os.environ["NLS_LANG"] = ".AL32UTF8" self.conf = Config() self.conf.init('biz.conf') self.dsn_tns = cx_Oracle.makedsn(self.conf.get('oracle.host'), self.conf.get('oracle.port'), sid=self.conf.get('oracle.sid')) self.conn = cx_Oracle.connect(self.conf.get('oracle.user'), self.conf.get('oracle.passwd'), self.dsn_tns) self.cursor = self.conn.cursor() def disconnect(self): try: self.cursor.close() self.conn.close() except Exception: raise Exception(traceback.format_exc())
def generateCSVs(data): list_len = len(data) if not list_len: raise ValueError() csv_folder = Path(Config.get('PLOT_DATA_FOLDER')) / 'csv' if not csv_folder.exists(): csv_folder.mkdir() for id, array in data.items(): NPYLoader.writeCSV(array.tolist(), csv_folder / f'{id}.csv')
class Oracle(object): def __init__(self): os.environ["NLS_LANG"] = ".AL32UTF8" self.conf = Config() self.conf.init('biz.conf') self.dsn_tns = self.conf.get('oracle.dsn').strip() passwd = decrypt_string(self.conf.get('oracle.passwd')) self.conn = cx_Oracle.connect( self.conf.get('oracle.user'), passwd, self.dsn_tns ) self.cursor = self.conn.cursor() def disconnect(self): try: self.cursor.close() self.conn.close() except Exception: raise Exception(traceback.format_exc())
class Email: '''发送邮件''' def __init__(self): self.email = Config().get('email') self.user = self.email.get('user') self.password = self.email.get('password') self.host = self.email.get('host') self.receivers = self.email.get('receivers') # 支持list self.subject = self.email.get('subject') self.contens = self.email.get('contens') #self.attachments =Config().get('attachments') #传入所需发送的报告 def send_email(self, attachments): yag = yagmail.SMTP(user=self.user, password=self.password, host=self.host) yag.send(to=self.receivers, subject=self.subject, contents=self.contens, attachments=attachments)
def get_env(config: Config): gym_env_name = config['env_name'] action_skip = bool(config.get('action_skip', False)) skip = int(config.get('skip', 4)) obs_stack = bool(config.get('obs_stack', False)) stack = int(config.get('stack', 4)) noop = bool(config.get('noop', False)) noop_max = int(config.get('noop_max', 30)) obs_grayscale = bool(config.get('obs_grayscale', False)) obs_resize = bool(config.get('obs_resize', False)) resize = config.get('resize', [84, 84]) obs_scale = bool(config.get('obs_scale', False)) env = gym.make(gym_env_name) env = BaseEnv(env) if noop and isinstance(env.observation_space, Box) and len( env.observation_space.shape) == 3: env = NoopResetEnv(env, noop_max=noop_max) if action_skip: env = SkipEnv(env, skip=skip) if isinstance(env.observation_space, Box): if len(env.observation_space.shape) == 3: if obs_grayscale or obs_resize: env = GrayResizeEnv(env, resize=obs_resize, grayscale=obs_grayscale, width=resize[0], height=resize[-1]) if obs_scale: env = ScaleEnv(env) if obs_stack: env = StackEnv(env, stack=stack) else: env = OneHotObsEnv(env) if isinstance(env.action_space, Box) and len( env.action_space.shape) == 1: env = BoxActEnv(env) return env
def save_gathering_data(self, subdirectory=None, name=None): try: directory = Config.get( "XPS_RESULT_PATH") + "\\" + datetime.now().strftime("%d%m%y") if not subdirectory is None: directory += "\\" + subdirectory filename = name + ".gather" file = directory + "\\" + filename if not os.path.exists(directory): os.makedirs(directory) handle = open(file, 'wb').write ftp = ftplib.FTP(Config.get("XPS_HOST")) ftp.login(Config.get("XPS_USER"), Config.get("XPS_PASSWORD")) ftp.cwd(Config.get("XPS_FTP_PATH")) ftp.retrbinary("RETR " + Config.get("XPS_FTP_FILE"), handle) ftp.quit() except Exception as e: logger.error('client: cannot save gathering file %s') raise RuntimeError(e) logger.debug("client: gathering data saved to %s", file) return
def main(): file = Config.get("PLOT_DATA_FOLDER") + Config.get("PLOT_DEFAULT_FILE") action = None type = None save = False try: opts, args = getopt.getopt(sys.argv[1:], "h", ["help", "plot=", "file=", "type=", "save"]) except getopt.GetoptError as err: print("Error: %s", err) sys.exit(2) for o, a in opts: if o in ("-h", "--help"): usage() elif o in ("--save"): save = True elif o in ("--file"): file = a elif o in ("--type"): type = a elif o in ("--plot"): action = a else: print("Error: unknown option %s", o) sys.exit(2) if action is None: print("Error: parameter 'plot' is mandatory") sys.exit(2) show(action, file, type, save) sys.exit(0)
def init_event(self): logger.debug('client: init xps events') types = [] for type in Config.get("XPS_DATA_TYPES"): types.append( Config.get("FP_GROUP") + "." + Config.get("FP_GROUP_NAME") + "." + type) self.xps.GatheringConfigurationSet(self.socket_id, types) logger.debug("client: set xps config %s", types) self.xps.EventExtendedConfigurationTriggerSet( self.socket_id, [Config.get("XPS_TRIGGER")], ['2'], ['0'], ['0'], ['0']) logger.debug("client: set xps event trigger") self.xps.EventExtendedConfigurationActionSet(self.socket_id, ["GatheringRun"], ['50000'], ['1'], ['0'], ['0']) logger.debug("client: set xps event action")
def json_repair(folder): spot_size = 6000 * 1024 # KB gather_size = 1000 * 1024 # KB if not os.path.isabs(folder): folder = Config.get("PLOT_DATA_FOLDER") + folder if not os.path.exists(folder) or not os.path.isdir(folder): print("Error: folder " + folder + " does not exist!") sys.exit(0) errors = {} for root, dirs, files in os.walk(folder): for file in files: if not file.endswith('.spot') and not file.endswith('.gather'): continue if (file.endswith('.spot') and os.path.getsize(folder + '/' + file) < spot_size) or \ (file.endswith('.gather') and os.path.getsize(folder + '/' + file) < gather_size): Logger.get_logger().debug('File to small: ' + file) name, ext = os.path.splitext(file) pattern = r"(\d*)_([\d\w-]*)_(\d*)" m = re.search(pattern, name) errors[name] = ((m.group(1), m.group(2), m.group(3))) runs_by_task = loadRuns() runs = {} runs['id'] = 'repair' runs['desc'] = 'Repeat previously broken runs' runs['runs'] = [] for name in errors: (id, task, run_id) = errors[name] old_run = runs_by_task[task][int(run_id)] run = {} run['velocity'] = old_run.vel run['position'] = old_run.pos run['frequency'] = old_run.freq run['config'] = old_run.cfg runs['runs'].append(run) print(run) return runs
def check_files(subdir, id): directory = Config.get("XPS_RESULT_PATH") + "\\" + datetime.now().strftime( "%d%m%y") success = True if not subdir is None: directory += "\\" + subdir spot = directory + "\\" + id + ".spot" gather = directory + "\\" + id + ".gather" if os.path.exists(spot): fsize = os.stat(spot).st_size logger.debug("cli: spot file: %s KB", fsize / 1024) if fsize < 8000000: # 10 MB success = False logger.error("cli: spot file too small") else: logger.error("cli: spot file '%s' doesnt exist", spot) success = False if os.path.exists(gather): fsize = os.stat(gather).st_size logger.debug("cli: gather file: %s KB", fsize / 1024) if fsize < 1000000: # 1 MB success = False logger.error("cli: gather file too small") try: np.genfromtxt(gather, skip_header=2) except Exception as e: logger.error("cli: unable to load gather file") logger.debug(e) success = False else: logger.error("cli: gather file '%s' doesnt exist", gather) success = False if not success: if os.path.exists(spot): os.remove(spot) if os.path.exists(gather): os.remove(gather) return success
class PassportTestCase(unittest.TestCase): url = Config.get('runner', 'RUNNER_URL') filename = Config.get('runner', 'CASE_URL') sheet_name = 'passport' execl = Excel(filename, sheet_name) def action(self, row, i): url = self.url + row['url'] data = row['data'] result = requests.post(url, data) res = result.json() self.execl.write(i + 2, res['code'], res['msg']) print res self.assertEqual(res['code'], row['code']) @staticmethod def getTestFunc(row, i): def func(self): self.action(row, i) return func
def change_parameter(self, pos=[], vel=None): try: # acceleration + velocity if not vel is None: logger.debug("client: change velocity to %s", vel) self.xps.PositionerSGammaParametersSet( self.socket_id, Config.get("FP_GROUP") + '.' + Config.get("FP_GROUP_NAME"), vel, Config.get("FP_ACCELERATION"), Config.get("FP_JERKTIME")[0], Config.get("FP_JERKTIME")[1]) # position for every axis if len(pos) > 0: logger.debug("client: change positions to %s", pos) for group, position in pos: self.xps.GroupMoveAbsolute(self.socket_id, group, position) except Exception as e: logger.error("client: XPS change position error") raise RuntimeError(e) return True