class Vrm(object): def __init__(self): self.driver = None self.is_synced = False self.logger = Logger() self.settings = Settings() self.valid_driver = self.__list_valid_driver() def __list_valid_driver(self): return self.settings.get_item("vrm_default","valid_driver").split(",") def init_object(self,vm_name,driver,memory=None,hd_size=None,password=None): if driver in self.__list_valid_driver() : self.driver = __import__("vrm.%s_driver" % driver, fromlist="*").Driver(vm_name,memory,hd_size,password) self.logger.success("[Vrm] Driver Object successfuly initialized for VirtualMacnine %s" % vm_name) if self.driver.is_synced : self.is_synced = True return True else : self.logger.critical("[Vrm] Invalid or not supported Driver, %s" % driver) return False def register_instance(self): if self.is_synced : self.logger.warning("[Vrm] Instance %s already created" % self.driver.vm_name) return False else : self.driver.make_server() self.logger.success("[Vrm] Instance %s successfuly created" % self.driver.vm_name) return True
def save(self): try: app_eui = hexlify( db0.hget(self.category + self.eui, ConstDB0.app_eui)).decode() except TypeError as e: Logger.warning(action='MsgDn', type=IDType.app, id=self.category + self.eui, msg=str(e)) app_eui = self.eui.split(':')[0] key = ConstDB0.msg_down + self.category + \ self.eui + ':' + str(round(self.ts)) pipe = db0.pipeline() pipe.hmset(key, self.__obj_to_dict()) pipe.expire(key, ConstMsg.EXPIRE_MSG) pipe.publish(Channel0.msg_alarm + app_eui, key) pipe.execute() self.__count_statistics_down() key = ConstDB2.dn_m + self.category + \ self.eui + ':' + str(round(self.ts)) pipe = db2.pipeline() info = {ConstMsg.fcnt: self.fcnt} if hasattr(self, ConstMsg.gateways): if isinstance(self.gateways, bytes): info[ConstMsg.gateways] = hexlify(self.gateways).decode() elif isinstance(self.gateways, list): info[ConstMsg.gateways] = ';'.join( map(bytes_to_hexstr, self.gateways)) pipe.hmset(key, info) pipe.expire(key, ConstMsg.EXPIRE_STATIS) pipe.publish(Channel0.dn_alarm + app_eui, key) pipe.execute()
def get(self, gateway_id): reslut = None payload = { 'key': self.REQUEST_KEY, 'location': self.location_param, 'output': 'JSON' } try: response = requests.get(self.REQUEST_URL, params=payload, timeout=self.TIMEOUT) if response.status_code == requests.codes.ok: response_data = response.json() if response_data['status'] == '1': self.normal_req(response_data, gateway_id) code = response_data['regeocode']['addressComponent'][ 'adcode'] reslut = code else: self.abnormal_req(response_data, gateway_id) else: Logger.warning(Action.get, 'Code API', response.status_code, response.content) except requests.exceptions.Timeout as e: Logger.error(Action.get, 'Code API', '', 'ERROR: %s' % str(e)) # except ValueError as e: # print('ERROR: %s' % str(e)) finally: return reslut
class BaseTrainer(object): """base trainer arguments: model: network optimizer: adam, sgd, rmsprop, ... criterion: loss scheduler: steplr, ... datasets: datasets for training, validation and test metric_manager: evaluating the performance of the network model_name: network log_filename: log """ _phase_list = ["train", "val"] def __init__(self, model: Module = None, optimizer: Optimizer = None, criterion: Module = None, scheduler: LRScheduler = None, datasets: Dict[Text, Dataset] = None, metric_manager: MetricManager = None, model_name: Text = "", log_filename: Text = "training_log", save_dir: Text = "./output", num_workers: int = 2, batch_size: int = 256, num_epochs: int = 1000, device: Text = "gpu") -> None: self.set_model(model) self._optimizer = optimizer self._criterion = criterion self._scheduler = scheduler self._metric_manager = metric_manager self._model_name = model_name self._save_dir = save_dir log_filename = os.path.join( self._save_dir, "log/{}_{}_{}.log".format( model_name, log_filename, time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()))) self._logger = Logger(log_filename, level="info") self._num_workers = num_workers self.num_epochs = num_epochs self.batch_size = batch_size self.device = device self._is_test = False if datasets: self.set_dataset(datasets) else: self._datasets = None def _set_device(self, device: Text) -> None: assert device in ["cpu", "gpu"], \ self._logger.warning("warning: device {} is not available".format(device)) if device == "gpu" and torch.cuda.is_available(): self._device = torch.device("cuda:0") else: self._device = torch.device("cpu") def set_model(self, model: Module) -> None: self._model = model def set_criterion(self, criterion: Module) -> None: self._criterion = criterion def set_optimizer(self, optimizer: Optimizer) -> None: self._optimizer = optimizer def set_lr_scheduler(self, scheduler: LRScheduler) -> None: self._scheduler = scheduler def set_dataset(self, datasets: Dict[Text, Dataset]) -> None: self._datasets = datasets # training and val self._data_loaders = { x: DataLoader(dataset=datasets[x], batch_size=self._batch_size, shuffle=True, num_workers=self._num_workers) for x in self._phase_list } # test if datasets.get("test"): self._data_loaders["test"] = DataLoader( dataset=datasets["test"], batch_size=self._batch_size, shuffle=False, num_workers=self._num_workers) self._is_test = True def set_metric_manager(self, metric_manager: MetricManager) -> None: self._metric_manager = metric_manager @property def num_epochs(self) -> int: return self._num_epochs @num_epochs.setter def num_epochs(self, value: int) -> None: self._num_epochs = value @property def batch_size(self) -> int: return self._batch_size @batch_size.setter def batch_size(self, value: int) -> None: self._batch_size = value @property def device(self) -> Text: return self._device @device.setter def device(self, device: Text) -> None: self._set_device(device) def _check(self) -> None: assert isinstance(self._model, Module), \ self._logger.error("error: the net is not availabe.") assert isinstance(self._criterion, Module), \ self._logger.error("error: the loss is not availabe.") assert isinstance(self._optimizer, Optimizer), \ self._logger.error("error: the optimizer is not availabe.") assert isinstance(self._datasets["train"], Dataset), \ self._logger.error("error: the train dataset is not availabe.") assert isinstance(self._datasets["val"], Dataset), \ self._logger.error("error: the val dataset is not availabe.") def _fetch_input_label(self, samples): return samples def train(self) -> Module: """train and validation only best model saved in validation phases """ self._check() self._model.to(self.device) since = time.time() best_model_wts = copy.deepcopy(self._model.state_dict()) self._metric_manager.clear_best_metric() for epoch in range(self._num_epochs): self._logger.info("-" * 30) self._logger.info("epoch {}/{}".format(epoch, self._num_epochs - 1)) # each epoch has a training and validation phase for phase in self._phase_list: epoch_stats = self._run_iter(phase) log_info = "[{}] ".format(phase) for key, value in epoch_stats.items(): log_info += "{}: {:.4f}, ".format(key, value) self._logger.info(log_info) # deep copy the model if phase == "val": if self._metric_manager.is_best(): best_model_wts = copy.deepcopy( self._model.state_dict()) log_info = "" for key, value in epoch_stats.items(): log_info += "-val_{}_{:.4f}".format(key, value) torch.save( self._model.state_dict(), os.path.join( self._save_dir, "model", "best_{}-epoch_{}{}.pth".format( self._model_name if self._model_name else "model", epoch, log_info))) time_elapsed = time.time() - since self._logger.info("-" * 30) self._logger.info("training complete in {:.0f}m {:.0f}s".format( time_elapsed // 60, time_elapsed % 60)) self._logger.info("best val {}: {:4f}".format( self._metric_manager.metric_name, self._metric_manager.best_metric)) # load best self._model weights self._model.load_state_dict(best_model_wts) return self._model def test(self) -> None: """test being called only once after training """ assert self._is_test, ValueError( "error: test dataset is not available.") self._logger.info("-" * 30) self._logger.info("-- test phase --") phase = "test" since = time.time() epoch_stats = self._run_iter(phase) log_info = "[{}] ".format(phase) for key, value in epoch_stats.items(): log_info += "{}: {:.4f}, ".format(key, value) self._logger.info(log_info) time_elapsed = time.time() - since self._logger.info("test complete in {:.0f}m {:.0f}s".format( time_elapsed // 60, time_elapsed % 60)) return None def _run_iter(self, phase: Text) -> Dict[Text, float]: dataset_sizes = { x: len(self._datasets[x]) for x in self._datasets.keys() } if phase == "train": # set model to training mode self._model.train() else: # set model to evaluate mode self._model.eval() self._metric_manager.zero_stats() # iterate over data for samples in self._data_loaders[phase]: inputs, labels = self._fetch_input_label(samples) inputs = inputs.to(self._device) labels = labels.to(self._device) # zero the parameter gradients self._optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == "train"): outputs = self._model(inputs) _, preds = torch.max(outputs, 1) loss = self._criterion(outputs, labels) # backward + optimize only if in training phase if phase == "train": loss.backward() self._optimizer.step() if self._scheduler: self._scheduler.step() # statistics self._metric_manager.running_call(preds, labels, loss) epoch_stats = self._metric_manager.epoch_call(dataset_sizes[phase], phase) return epoch_stats
class Shelf(unittest.TestCase): @classmethod def setUpClass(self): self.START_TIME = time.time() self.logger = Logger(CASE_NAME).get_logger() self.driver = get_driver() self.dr = Basic(self.driver, self.logger) self.p = ShelfPage(self.dr) def test1_login(self): """登陆账号""" MarketerName = testing_environmental.get('MarketerName') MarketerPassword = testing_environmental.get('MarketerPassword') shelf_url = TEST_SHELF_URL self.logger.info('测试用例:%s,开始执行!' % CASE_NAME) try: self.dr.get_url(shelf_url) login(self.dr, MarketerName, MarketerPassword) self.logger.info('登陆成功,营销员账号是:%s' % MarketerName) except: self.logger.error('登陆失败,营销员账号是:%s' % MarketerName) @unittest.skip('卡顿') def test2_operationShelf(self): """货架增删改查测试""" self.logger.info('执行货架增删改查测试:') newShelfName = random_str(4, 6) # 新建货架名称 _newShelfName = "\"" + newShelfName + "\"" try: # 新建货架 self.p.add_shelf() self.p.input_shelfName(newShelfName) self.p.primaryBtn() resAddInfo = self.p.shelf_txt() self.p.sureBtn() self.logger.info(resAddInfo) time.sleep(1) # 复制货架 self.dr.click_perform('[data-name=%s]' % _newShelfName, '[class="btnCopyShelf"]') self.p.sureBtn() resCopyeInfo = self.p.shelf_txt() self.logger.warning(resCopyeInfo) self.assertEqual('复制成功', resCopyeInfo, resCopyeInfo) self.p.sureBtn() time.sleep(1) # 编辑货架 reName = random_str(4, 6) # 修改后的货架名称 _reName = "\"" + reName + "\"" self.dr.click_perform('[data-name=%s]' % newShelfName, '[class="btnModifyShelf"]') time.sleep(1) self.dr.clear('name', 'shelfname') self.dr.input('name', 'shelfname', reName) self.dr.click('id', 'modifyShelfName') resEditInfo = self.p.shelf_txt() self.logger.warning(resEditInfo) self.assertEqual('编辑成功', resEditInfo, resEditInfo) self.p.sureBtn() # 删除货架 time.sleep(1) self.dr.click_perform('[data-name=%s]' % _reName, '[class="btnDeleteShelf"]') self.p.sureBtn() resDelInfo = self.p.shelf_txt() self.p.sureBtn() self.logger.warning(resDelInfo) self.assertEqual('删除成功', resDelInfo, resDelInfo) # 删除复制的货架 time.sleep(1) self.dr.click_perform('[data-name=%s]' % _newShelfName, '[class="btnDeleteShelf"]') self.p.sureBtn() resDelInfo2 = self.p.shelf_txt() self.p.sureBtn() self.logger.warning(resDelInfo) self.assertEqual('删除成功', resDelInfo2, resDelInfo2) self.logger.info('货架增删改查测试执行通过!') except: self.logger.error('Operation Shelf Error') def test2_shareShelf(self): """分享货架""" time.sleep(1) self.dr.click_perform('[class="table-list-title row hidden-xs"]', '[class="glyphicon glyphicon-ok"]') self.dr.click('id', 'btnShareShelves') time.sleep(1) self.dr.click('css', '[spm="cloudmarketing.shelflist.shareshelfcopy"]') self.p.sureBtn() path1 = self.dr.js_script("return jQuery('[id=copyPath]').val();") self.dr.click('class', 'close') # # @unittest.skip('卡顿') # def test8_product_management(self): # time.sleep(1) # self.dr.click_perform('[data-name="默认货架"]', '[target="_blank"]') # self.dr.jump_off() # time.sleep(1) # # 找货 # self.dr.click('css', '[href="/home/cloudmarketing/hyzx"]') # self.flow.product_selecet('沈阳') # self.flow.join_shelf() # self.dr.click_perform('[data-name="默认货架"]', '[target="_blank"]') # self.dr.jump_off() # # 下架 # self.dr.click('css', '[class="off-shelf off-goods"]') # self.btn.surebtn() # i = self.btn.shelf_txt() # self.assertEqual('下架成功', i, i) # self.btn.surebtn() # self.dr.click('id', 'offShelf') # # 上架 # self.dr.click('css', '[class="on-shelf on-goods"]') # self.btn.surebtn() # i = self.btn.shelf_txt() # self.assertEqual('上架成功', i, i) # self.btn.surebtn() # # 删除 # self.dr.click('css', '[class="delete-shelf delete-goods"]') # self.btn.surebtn() # i = self.btn.shelf_txt() # self.assertEqual('删除成功', i, i) # self.btn.surebtn() # # 批量管理 # self.dr.click('id', 'show-btn') # # @unittest.skip('卡顿') # def test9_capablity_management(self): # # 找货 # self.dr.click('css', '[href="/home/cloudmarketing/hyzx"]') # self.dr.input('id', 'newKeyword', '沈阳') # self.dr.click('class', 'new_search_btn') # time.sleep(1) # self.dr.click('css', '#capacity > span') # self.flow.join_shelf() # self.dr.click_perform('[data-name="默认货架"]', '[target="_blank"]') # self.dr.jump_off() # # 点击能力标签 # self.dr.click('css', '[class="btn btn-white"]') # # 下架 # self.dr.click('css', '[class="off-shelf off-capablity"]') # self.btn.surebtn() # i = self.btn.shelf_txt() # self.assertEqual('下架成功', i, i) # self.btn.surebtn() # self.dr.click('id', 'offShelf') # # 上架 # self.dr.click('css', '[class="on-shelf on-capablity"]') # self.btn.surebtn() # i = self.btn.shelf_txt() # self.assertEqual('上架成功', i, i) # self.dr.click('css', '[class="btn btn-primary sureBtn"]') # # 删除 # self.dr.click('css', '[class="delete-shelf delete-capablity"]') # self.btn.surebtn() # i = self.btn.shelf_txt() # self.assertEqual('删除成功', i, i) # self.btn.surebtn() # # 批量管理 # self.dr.click('id', 'show-btn') @classmethod def tearDownClass(self): # self.dr.quit() END_TIME = time.time() RUN_TIME = round((END_TIME - self.START_TIME), 2) self.logger.info('用例执行时长:%s秒' % RUN_TIME)
def abnormal_req(self, response_data, gateway_id): Logger.warning( Action.get, 'Code API', response_data['infocode'], response_data['info'] + ' | id=%s location=%s' % (gateway_id, self.location_param))