def fit(self): exp = self.experiment loss = exp.loss metrics = exp.metrics train_dataset = exp.train_dataset val_dataset = exp.val_dataset print('val_dataset: {}'.format(val_dataset)) model = exp.model batch_size = self.batch_size # todo: a compile method, either in Model or in Experiment model.compile(self.optimizer, loss, metrics, loss_weights=loss.loss_weights) self.train_gen = self.get_batch_generator('train') self.val_gen = self.get_batch_generator('val') self.steps_per_epoch = self.steps_per_epoch or (len(train_dataset) // batch_size) self.validation_steps = self.validation_steps or (len(val_dataset) // batch_size) if loss.is_computing_validation_metrics: validation_data = None validation_steps = None else: validation_data = self.val_gen validation_steps = self.validation_steps logger.info('Enter fitting loop') self._history = model.fit_generator(self.train_gen, steps_per_epoch=self.steps_per_epoch, callbacks=self.callbacks, validation_data=validation_data, validation_steps=validation_steps, **self.params) logger.info('Exit fitting loop')
def get_element(self, context): try: elem = context.find_element(*self.locator) except: if self.count > 2: # logger.info("已经超过最大查找次数!") return None self.count += 1 # 判断黑名单元素是否出现 for black in self.black_list: black_elements = context.find_elements(*black) if len(black_elements) >= 1: black_elements[0].click() logger.info("黑名单{}元素出现,已点击!".format(black)) else: logger.info("黑名单{}元素未出现!".format(black)) else: return self.get_element(context) else: try: style_red = 'arguments[0].style.border="2px solid red"' context.execute_script(style_red, elem) except BaseException: return elem return elem
def run(self, backup=True): logger.info('running %s', str(self)) if backup: self.backup_handler.dump_config(self.config) status = self.status if status is 'done': logger.info('Experiment is done.') self.backup_handler.load_snapshot(self.model, -1) backup = False elif status is 'initialized': self.start() elif status.startswith('finished'): snap_i = self.backup_handler.last_snapshot_epoch() self.train_config.setdefault('initial_epoch', snap_i) _ = self.history self._trainer = None self.backup_handler.load_snapshot(self.model, snap_i) self.start(prefix='Resuming') self._history = self.backup_handler.train_logs _ = self.train_config.pop('initial_epoch') self._trainer = None else: logger.warning('unexpected status:', status) return self.results.summary() if backup: self.backup()
def kfold_cross_validation(self, n_splits): """ Evaluates the precision of the classifier by running its prediction on the training data set :param n_splits: number of splits to split the training data into :type n_splits: int :return: the percentage of correctly predicted categories :rtype: float """ if n_splits == 1: return np.mean( self.predict(self._clf_data.data) == self._clf_data.target) length = len(self._clf_data.data) sub_length = int(length / n_splits) logger.info( "Training data splitted in {} splits, each of length of {}, for a total length of {}" .format(n_splits, sub_length, length)) splits = [] for i in range(0, n_splits): split = ClassifierData( self._clf_data.data[i:i + sub_length], self._clf_data.target[i:i + sub_length], self._clf_data.target_names[i:i + sub_length]) splits.append(split) split_length = len(split.data) total = 0 precisions = [] for split in splits: training_data = ClassifierData([], [], self.target_names) total += len(split.data) for other_split in splits: if other_split != split: for i in range(len(other_split.data)): training_data.data.append(other_split.data[i]) for i in range(len(other_split.data)): training_data.target.append(other_split.target[i]) #type(self) will invoke the right constructor for children test_classifier = type(self)(training_data.data, training_data.target, training_data.target_names) test_classifier.train() logger.debug("Trained on split {}".format(split)) precision = np.mean( test_classifier.predict(split.data) == split.target) logger.debug("Precision on other split {} : {}".format( split, precision)) precisions.append(precision) logger.debug("Total added splits length: {}".format(total)) return np.mean(precisions)
def swipe_down(self, count=1, time=0.5): try: for i in range(count): x1 = self.get_size()[0] / 2 y1 = self.get_size()[1] / 10 y2 = self.get_size()[1] * 0.9 self.driver.swipe(x1, y1, x1, y2) sleep(time) except Exception as e: logger.info(e)
def swipe_left(self, count=1, time=0.5): try: for i in range(count): x1 = self.get_size()[0] * 0.9 y1 = self.get_size()[1] / 2 x2 = self.get_size()[0] / 10 self.driver.swipe(x1, y1, x2, y1) sleep(time) except Exception as e: logger.info(e)
def common_driver(cmdopt): global driver logger.info(cmdopt) base_driver = BaseDriver(eval(cmdopt)) base_driver.start_appium() time.sleep(3) driver = base_driver.get_driver() yield driver # driver.close_app() driver.quit()
def swipe_on(self, direction, count, time): try: if direction == 'up': self.swipe_up(count, time) elif direction == 'down': self.swipe_down(count, time) elif direction == 'right': self.swipe_right(count, time) else: self.swipe_left(count, time) except Exception as e: logger.info(e)
def get_driver(self): desired_caps = read_yaml(CAPS_DIR) # desired_caps["platformVersion"] = self.device_info["platform_version"] desired_caps["deviceName"] = self.device_info["device_name"] desired_caps["systemPort"] = self.device_info["system_port"] logger.info( f'http://127.0.0.1:{self.device_info["server_port"]}/wd/hub') driver = webdriver.Remote( f'http://{self.host}:{self.device_info["server_port"]}/wd/hub', desired_caps) driver.implicitly_wait(5) return driver
def single_run(self, csv_input, configuration, redirect_to=None): cmd = '%s %s ' % (self.exe_path, csv_input) for key, val in configuration: if val is None: cmd += '-%s ' % key else: cmd += '-%s %s ' % (key, str(val)) if redirect_to: cmd += '>> %s' % redirect_to logger.info('Running', cmd) os.system(cmd)
def stocks_list(self): if self._stocks_list is None: logger.info('Loading Stocks:') self._stocks_list = [] for tckt, path in self.gen_all_paths(): self._stocks_list.append( Stock(tckt, load_stock(path), feature_list=[ get_feature(f_name, **params) for f_name, params in self.feature_list_with_params + self._non_numerical_features ])) return self._stocks_list
def find(self, context): if self.get_element(context) is not None: logger.info("{desc}, 对象已访问, {elm} ".format(desc=self.describe, elm=self.locator)) try: return context.find_elements(*self.locator) except Exception: file_name = '-'.join(self.locator) + "对象未找到截图.png" capture_screenshots(context, file_name) # 日志 logger.info("{desc}, 对象未找到, {elm} ".format(desc=self.describe, elm=self.locator)) return [] else: return []
def start_appium(self): port = self.device_info["server_port"] if not check_port(self.host, port): release_port(port) time.sleep(2) cmd = "appium -p {0} -bp {1}".format( self.device_info["server_port"], self.device_info["server_port"] + 1) logger.info(f"start appium server: {cmd}") out_path = os.path.join(LOG_DIR, 'appium', str(self.device_info["server_port"]) + ".log") subprocess.Popen(cmd, shell=True, stdout=open(out_path, 'a'), stderr=subprocess.STDOUT)
def find(self, context): for i in range(1, self.time_out): if self.log is False: logger.info("{desc}, {n} times search, {elm} ".format( desc=self.describe, n=i, elm=self.locator)) if self.get_element(context) is not None: return self.get_element(context) else: sleep(1) else: if self.get_element(context) is None: file_name = '-'.join(self.locator) + "对象未找到截图.png" capture_screenshots(context, file_name) logger.info("{desc}, 对象未找到, {elm} ".format(desc=self.describe, elm=self.locator)) return self.get_element(context)
def check_port(host, port): """ 检查指定端口是否被占用 :param host: 主机地址 :param port: 端口号 :return: Boolean """ if isinstance(port, str): port = int(port) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((host, port)) s.shutdown(2) except OSError as msg: # 未占用 logger.info("port {} is available".format(port)) return True else: # 已占用 logger.info("port {} already be in use".format(port)) return False
def run_pro(device_info): logger.info("回归模式,开始执行✈✈!") now_time = time.strftime("%Y_%m_%d_%H_%M_%S") + "-" + str( device_info['server_port']) init_env(now_time) html_report = os.path.join(REPORT_DIR, now_time, "html") xml_report = os.path.join(REPORT_DIR, now_time, "xml") pytest.main([ f"--cmdopt={device_info}", "-s", "-v", "--alluredir=" + xml_report, cases_path, "--reruns", rerun ]) allure_report = "allure generate {xml} -o {html}".format(xml=xml_report, html=html_report) logger.info(allure_report) os.popen(allure_report) logger.info("运行结束,生成测试报告♥❤!")
def release_port(port): """ 释放端口 :param port: 端口号 :return: """ if platform == "linux" or platform == "linux2" or platform == "darwin": cmd = "lsof -i tcp:%s | awk '$9 eq \"%s\" && $10==\"(LISTEN)\"{print $2}'" %(port, port) pid = os.popen(cmd).read() logger.info(f"appium pid: {pid}") cmd_kill = f"kill -9 {pid}" else: cmd = f"netstat -ano | findstr {port}" logger.info(cmd) result = os.popen(cmd).read() if str(port) and "LISTENING" in result: index = result.index("LISTENING") start = index + len("LISTENING") + 7 end = result.index("\n") pid = result[start:end] cmd_kill = f"taskkill -f -pid {pid}" logger.info(f"kill appium pid: {cmd_kill}") os.popen(cmd_kill)
def start(self, prefix='Starting '): logger.info('\n' + prefix + ' experiment: ' + str(self) + '\n') self._create() self.trainer.fit()