def test_data_loader(self): data_dir = os.path.join(cdir, '..', 'data') with self.assertRaises(Exception): dst_df = dh.load_csv_to_df('file_name.csv') file_path = os.path.join(data_dir, 'AEP_hourly.csv') if not os.path.exists(file_path): logger.critical('%s does not exist. test skipped.', file_path) dst_df = dh.load_csv_to_df(file_path) self.assertEqual(dst_df.shape, (121296, 1)) if not os.path.exists(data_dir): logger.critical('%s does not exist. test skipped.', data_dir) else: data = dh.data_loader(data_dir, 12) self.assertEqual(len(data), 11) self.assertEqual(data['AEP_hourly'].shape, (121296, 1)) self.assertEqual(data['COMED_hourly'].shape, (66504, 1)) self.assertEqual(data['DAYTON_hourly'].shape, (121296, 1)) self.assertEqual(data['DEOK_hourly'].shape, (57744, 1)) self.assertEqual(data['DOM_hourly'].shape, (116208, 1)) self.assertEqual(data['DUQ_hourly'].shape, (119088, 1)) self.assertEqual(data['EKPC_hourly'].shape, (45336, 1)) self.assertEqual(data['FE_hourly'].shape, (62880, 1)) self.assertEqual(data['NI_hourly'].shape, (58464, 1)) self.assertEqual(data['PJME_hourly'].shape, (145392, 1)) self.assertEqual(data['PJMW_hourly'].shape, (143232, 1))
def select_channel(self, channel: str): _channel = (self.MB.XPATH, f'{self._root_xpath}//*[@text="{channel}"]/../..') try: self.find(_channel).click() except NoSuchElementException: logger.critical(f'该频道不存在,请检查:{channel}')
def check_database_connection() -> bool: database_working = False try: User.query.limit(1).all() database_working = True except Exception as e: logger.critical(str(e)) return database_working
def check_database_connection(): database_working = False try: User.query.limit(1).all() database_working = True output = "ok" except Exception as e: logger.critical(str(e)) output = "Database connection failed" return database_working, output
def run_tasks(self): while True: # with the try/except below run virtually eternally currentMillis = datetime.now() if (currentMillis - self.last_health_check).total_seconds() * 1000 >= self.HEALTH_CHECK_PERIOD: logger.debug("Health check time....") self.health_checks() self.last_health_check = datetime.now() try: if self.can_execute < 1: self.can_execute = self.consumer.run() else: pass except Exception as err: logger.critical(str(err))
def startup(): ''' Runs during startup to set up the databases The rationale is to defer execution until .env is loaded ''' global mongo_client, db try: mongo_client = pymongo.MongoClient(os.environ.get("MONGOKEY")) except Exception: logger.critical("MongoDB Could not be connected. Check that you have " "the correct MongoDB key in your .env file, and " "the that you have dnspython installed" "\nTerminating program...") sys.exit(1) # Exit with error db = mongo_client[mongo_client.list_database_names()[0]] logger.info("MongoDB Atlas connected to: " f"{mongo_client.list_database_names()[0]}")
async def connection_closed(self, ws): logger.critical("connection failed to peer") self.sockets.remove(ws)
def switch_channel(cls, channel: str): _channel = (cls.MB.XPATH, f'//*[@text="{channel}"]/..') try: cls.find(_channel).click() except NoSuchElementException: logger.critical(f'该频道不存在,请检查:{channel}')
| | | | | `--' | | '--' || |____ | `----. |__| |__| \______/ |_______/ |_______||_______| """ model = model() loss = nn.CrossEntropyLoss() optim = Adam(model.parameters(), lr=1e-3) if conf.level == 'DEBUG': logger.info(model) logger.info("model, loss, optim is ready") for param in model.features.parameters(): param.requires_grad = False checkpoint = torch.load('./ckpts/32') model.load_state_dict(checkpoint['model_state_dict'], strict=True) logger.critical('loaded pretrained model') """ .______ .______ _______ .______ ___ .______ _______ | _ \ | _ \ | ____|| _ \ / \ | _ \ | ____| | |_) | | |_) | | |__ | |_) | / ^ \ | |_) | | |__ | ___/ | / | __| | ___/ / /_\ \ | / | __| | | | |\ \----.| |____ | | / _____ \ | |\ \----.| |____ | _| | _| `._____||_______|| _| /__/ \__\ | _| `._____||_______| """ cudnn.benchmark = True if conf.multi_gpu: model = nn.DataParallel(model) model = model.cuda() loss = loss.cuda() logger.info('model is ready to train / infer')
def tap_share(self): self.find(self._share_button).click() if not self.find(self._share_page).is_displayed(): logger.critical('分享操作出现异常') raise RuntimeError()
def _find_by(cls, by=MB.ID, value=None) -> WebElement: try: return BaseDriver.get_driver().find_element(by=by, value=value) except NoSuchElementException: logger.critical(f'元素未找到!\nby -> {by}\nexpression -> {value}')
def start_run() -> None: """ Run the training and the testing of the network. Run steps: - preparation - load datasets - build network - send data to device (GPU/CPU) - train network - test network - clean up """ # Prepare the environment preparation() # Catch and log every exception during the runtime # noinspection PyBroadException try: with SectionTimer('run'): with SectionTimer('datasets loading', 'debug'): # Load dataset from user function train_dataset, test_dataset = load_datasets() # Build network from user function network = build_network() # Automatically chooses between CPU and GPU if not specified if settings.device is None or settings.device == 'auto': device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') else: device = torch.device(settings.device) logger.debug(f'pyTorch device selected: {device}') # Send the network and the datasets to the selected device (CPU or CUDA) # We assume the GPU have enough memory to store the whole network and datasets. If not it should be split. network.to(device) train_dataset.to(device) test_dataset.to(device) # Save network stats and show if debug enable network_metrics(network, test_dataset[0][0].shape, device) # Start the training train(network, train_dataset, test_dataset, device) # Start normal test test(network, test_dataset, device, final=True) # Arrived to the end successfully (no error) save_results(success_run=True) except KeyboardInterrupt: logger.error('Run interrupted by the user.') raise # Let it go to stop the runs planner if needed except Exception: logger.critical('Run interrupted by an unexpected error.', exc_info=True) finally: # Clean up the environment, ready for a new run clean_up()
loss = loss.cuda() logger.info('model is ready to train / infer') """ .___________..______ ___ __ .__ __. | || _ \ / \ | | | \ | | `---| |----`| |_) | / ^ \ | | | \| | | | | / / /_\ \ | | | . ` | | | | |\ \----./ _____ \ | | | |\ | |__| | _| `._____/__/ \__\ |__| |__| \__| """ for epoch in range(1, 1 + 100): logger.critical('Epoch:{}'.format(epoch)) for idx, (image, mask, labels, edges, sp_size, y_gt) in enumerate(train_dataset): (image, mask, labels, edges, sp_size) = (image.cuda(), mask.cuda(), labels.cuda(), edges, sp_size) image = image.unsqueeze(0) while True: logits = model(image, labels, edges, sp_size) selected_sp = list(y_gt.keys()) logits_ = logits[selected_sp] ans_ = torch.LongTensor([y_gt[i] for i in selected_sp]).cuda() _, ids = torch.topk(logits_, 1) print(selected_sp, logits.detach().cpu().numpy().tolist(),