def main(): ########### 读取配置文件 ########## ch = config.ConfigHandler("./config.ini") ch.load_config() ########### 读取参数 ########## train_batch_size = int(ch.config["model"]["train_batch_size"]) test_batch_size = int(ch.config["model"]["test_batch_size"]) num_epochs = int(ch.config["model"]["num_epochs"]) learning_rate = float(ch.config["model"]["learning_rate"]) class_size = int(ch.config["model"]["class_size"]) ########### 读取log和model ########## log_interval = int(ch.config["log"]["log_interval"]) version_name = ch.config["log"]["version_name"] train_file = ch.config["data"]["train_file"] test_file = ch.config["data"]["test_file"] ########### 获取训练数据loader ########## data_train = Dataset.ImageDataset(train_file, train=True) data_loader_train = torch.utils.data.DataLoader( dataset=data_train, batch_size=train_batch_size, shuffle=True) ########### 获取测试数据loader ########## data_test = Dataset.ImageDataset(test_file, train=False) data_loader_test = torch.utils.data.DataLoader(dataset=data_test, batch_size=test_batch_size, shuffle=False) ########### 训练和评价 ########## train.train_and_test(num_epochs, learning_rate, class_size, data_loader_train, data_loader_test, log_interval, version_name).train_epoch()
def load_config_file_and_check_zip_required(config_filename): global logger latest_timestamp = None handler_config = None zip_required = False logger.info("Start checking file status for '%s'" % config_filename) try: config_handler = config.ConfigHandler( zip_handler_config.ZipHandlerConfig()) handler_config = config_handler.read_config_file(config_filename) except Exception as e: logger.error("Error '%s' while reading configuration file" % str(e)) return None try: zip_handler = ZipHandler(handler_config) latest_timestamp = zip_handler.get_latest_timestamp() zip_timestamp = zip_handler.get_zip_timestamp() zip_required = latest_timestamp > zip_timestamp except Exception as e: logger.error("Error '%s' while checking file status" % str(e)) return None logger.info("Returning status 'zip required': %s" % str(zip_required)) return zip_required
def load_config_file_and_sync(config_filename): global logger config_handler = config.ConfigHandler(sync_config.SyncConfig()) tool_sync_config = config_handler.read_config_file(config_filename) sync_tool = sync.SyncTool(tool_sync_config) try: sync_tool.scan() except Exception as e: logger.error("Exception %s during scan" % str(e)) return try: sync_tool.sync() except Exception as e: logger.error("Exception %s during sync" % str(e))
def load_config_file_and_zip(config_filename): global logger pyzipista_config = None logger.info("Starting application for config %s", config_filename) try: config_handler = config.ConfigHandler( zip_handler_config.ZipHandlerConfig()) pyzipista_config = config_handler.read_config_file(config_filename) except Exception as e: logger.exception("Error '%s' while reading configuration file %s" % (str(e), config_filename)) if not pyzipista_config: logger.warn('Could not read config %s', config_filename) try: pyzipista_config.dump() zip_handler = ZipHandler(pyzipista_config) zip_handler.create_zip_file() except Exception as e: logger.exception("Error '%s' while writing zip file" % str(e)) logger.info("Terminating application")
def main(): ########### 读取配置文件 ########## ch = config.ConfigHandler("./config.ini") ch.load_config() ########### 读取参数 ########## train_batch_size = int(ch.config["model"]["train_batch_size"]) val_batch_size = int(ch.config["model"]["val_batch_size"]) num_epochs = int(ch.config["model"]["num_epochs"]) learning_rate = float(ch.config["model"]["learning_rate"]) class_size = int(ch.config["model"]["class_size"]) log_interval = int(ch.config["log"]["log_interval"]) ########### 获取数据loader ########## data_loader = dataset.MyDataset(train_batch_size, val_batch_size) data_loader_train =data_loader.load_train_data() data_loader_test = data_loader.load_test_data() ########### 训练和评价 ########## train.train_and_test(num_epochs, learning_rate, class_size, data_loader_train, data_loader_test, log_interval).train_epoch()
def test(): config_handler = config.ConfigHandler(sync_config.SyncConfig()) sample_config = config_handler.read_config_file( 'etc/gitsynchista_config_sample') sample_config.dump()
def main(): ########### 读取配置文件 ########## ch = config.ConfigHandler("./config.ini") ch.load_config() ########### 读取参数 ########## train_batch_size = int(ch.config["model"]["train_batch_size"]) valid_batch_size = int(ch.config["model"]["valid_batch_size"]) test_batch_size = int(ch.config["model"]["test_batch_size"]) num_epochs = int(ch.config["model"]["num_epochs"]) learning_rate = float(ch.config["model"]["learning_rate"]) class_size = int(ch.config["model"]["class_size"]) ########### 读取log和model ########## log_interval = int(ch.config["log"]["log_interval"]) version_name = ch.config["log"]["version_name"] train_file = ch.config["data"]["train_file"] valid_file = ch.config["data"]["valid_file"] test_file = ch.config["data"]["test_file"] ########### 预测结果输出 ########## pred_file = ch.config["save"]["pred_file"] ########### 获取训练数据loader ########## data_train = Dataset.ImageDataset(train_file, train=True) data_loader_train = torch.utils.data.DataLoader( dataset=data_train, batch_size=train_batch_size, shuffle=True) ########### 获取验证数据loader ########## data_valid = Dataset.ImageDataset(valid_file, train=False) data_loader_valid = torch.utils.data.DataLoader( dataset=data_valid, batch_size=valid_batch_size, shuffle=True) ########### 获取测试数据loader ########## data_test = Dataset.ImageDataset(test_file, train=False) data_loader_test = torch.utils.data.DataLoader(dataset=data_test, batch_size=test_batch_size, shuffle=False) ########### 训练和评价 ########## trainer = train.train_and_test(num_epochs, learning_rate, class_size, data_loader_train, data_loader_valid, data_loader_test, log_interval, version_name, pred_file) ########## start train ########### print("start train") begin_time = time() trainer.train_epoch() end_time = time() run_time = end_time - begin_time print('cost time:', run_time) ########## start eval ########### print("start test") trainer.test()
def main(): ########### 读取配置文件 ########## ch = config.ConfigHandler("./config.ini") ch.load_config() ########### 读取参数 ########## max_query_len_char = int(ch.config["data"]["max_query_len_char"]) max_query_len_word = int(ch.config["data"]["max_query_len_word"]) char_voc_size = int(ch.config["data"]["char_voc_size"]) word_voc_size = int(ch.config["data"]["word_voc_size"]) char_embedding_size = int(ch.config["model"]["char_embedding_size"]) word_embedding_size = int(ch.config["model"]["word_embedding_size"]) dropout_keep_prob = float(ch.config["model"]["dropout_keep_prob"]) batch_size = int(ch.config["model"]["batch_size"]) num_epochs = int(ch.config["model"]["num_epochs"]) learning_rate = float(ch.config["model"]["learning_rate"]) class_size = int(ch.config["model"]["class_size"]) char_filter_set = [] for char_filter in ch.config["cnn_model"]["char_filter_set"].split(';'): char_filter_set.append(list(map(int, char_filter.split(',')))) ########### 查看参数 ########## print("max_query_len_char:") print(max_query_len_char) print("max_query_len_word:") print(max_query_len_word) print("char_voc_size:") print(char_voc_size) print("word_voc_size:") print(word_voc_size) print("char_embedding_size:") print(char_embedding_size) print("word_embedding_size:") print(word_embedding_size) print("dropout_keep_prob:") print(dropout_keep_prob) print("batch_size:") print(batch_size) print("num_epochs:") print(num_epochs) print("learning_rate:") print(learning_rate) print("class_size:") print(class_size) print("char_filter_set:") print(char_filter_set)
def test(): global logger logger.info("Start test") config_handler = config.ConfigHandler(sync_config.SyncConfig()) sample_config = config_handler.read_config_file( 'etc/gitsynchista_config_sample') sample_config.dump() logger.info("End test")
def __init__(self, port=1234): super(VpnListener, self).__init__() # the vpn server's port self.port = port # the current connection objects self.connections = [] # make sure we won't access a shared resources from # different threads... self.mutex = threading.Lock() # set up the virtual router self.router = Router(self) # set up the config handler self.config = config.ConfigHandler(self) # set up the connections manager self.conns_handler = connection.ConnectionsHandler( self, self.config.config)
def main(): print(__copyright__) config_handler = cfg.ConfigHandler(DEFAULT_CHIP) wxapp = wx.App() locale = wx.Locale(wx.LANGUAGE_ENGLISH) main_gui = baseGui.MainWindow(None, config_handler) module_handler = ModuleHandler(main_gui, config_handler) main_gui.Show(True) wxapp.MainLoop() return 0
def main(): ########### 读取配置文件 ########## ch = config.ConfigHandler("./config.ini") ch.load_config() ########### 读取参数 ########## train_size = int(ch.config["model"]["train_size"]) train_batch_size = int(ch.config["model"]["train_batch_size"]) test_batch_size = int(ch.config["model"]["test_batch_size"]) num_epochs = int(ch.config["model"]["num_epochs"]) train_file = ch.config["data"]["train_file"] test_file = ch.config["data"]["test_file"] ########### 获取训练数据loader ########## data_train = Dataset.FlowerDataset(train_file, train_size, train=True) data_loader_train = torch.utils.data.DataLoader( dataset=data_train, batch_size=train_batch_size, shuffle=True) ########### 查看训练数据 ########## for epoch in range(0, num_epochs): for batch_idx, (data, target) in enumerate(data_loader_train): for i in range(0, len(data)): im = Image.fromarray(data[i, :, :, :].numpy()) im.save( str(epoch) + "_train_" + str(batch_idx) + "_" + str(i) + ".jpeg") ########### 获取测试数据loader ########## data_test = Dataset.FlowerDataset(test_file, test_batch_size, train=False) data_loader_test = torch.utils.data.DataLoader(dataset=data_test, batch_size=test_batch_size, shuffle=False) ########### 查看测试数据 ########## print(type(data_loader_test)) dataiter = iter(data_loader_test) img, label = dataiter.next() print(type(img), type(label)) print(img.size(), label.size()) for i in range(test_batch_size): im = Image.fromarray(img[i, :, :, :].numpy()) im.save("test_" + str(i) + ".jpeg")
def find_sync_configs(base_path='..'): config_filenames = [] configs = [] for (dirpath, dirnames, filenames) in os.walk(base_path, topdown=True, onerror=None, followlinks=False): for filename in filenames: if filename == GITSYNCHISTA_CONFIG_FILE: config_filenames.append(os.path.join(dirpath, filename)) for filename in config_filenames: config_handler = config.ConfigHandler(sync_config.SyncConfig()) a_sync_config = config_handler.read_config_file(filename) configs.append(a_sync_config) return configs
def __init__(self,conf_path:str): self.config = config.ConfigHandler(conf_path) self.git:github.Github =None