def main(alpha=None, gamma=None): config = Config(args.config_path) if args.mode: config.mode = args.mode if args.train_id: config.train_id = args.train_id if args.num_epochs: config.num_epochs = args.num_epochs if args.base_dir: config.base_dir = args.base_dir config.use_bayes_opt = args.use_bayes_opt config.use_preprocess = args.use_preprocess config.use_swa = args.use_swa train_path = os.path.join(config.base_dir, config.train_dir, config.train_id) result_path = os.path.join(config.base_dir, config.result_dir, config.train_id) data_path = os.path.join(config.base_dir, config.data_dir) if not os.path.isdir(train_path): os.mkdir(train_path) if not os.path.isdir(result_path): os.mkdir(result_path) init_logger(os.path.join(result_path, 'log.txt')) set_seed(config) # get data loader tokenizer = AutoTokenizer.from_pretrained(config.bert_model_name) param = {"root": data_path, "batch_size": config.batch_size, "tokenizer": tokenizer, "config": config} train_dataloader = data_loader(**param, phase='train') validate_dataloader = data_loader(**param, phase='validate') test_dataloader = data_loader(**param, phase='test') # create model config 확인 model = Trainer(config, train_dataloader, validate_dataloader, test_dataloader) if config.mode == 'train': result = model.train(alpha=alpha, gamma=gamma) elif config.mode == 'test': model.load_model(config.model_weight_file) result = model.evaluate('test') del model return result
PATH = '../CICFlowMeter-4.0/bin/data/daily/' target_file = sorted(glob.glob(PATH + '*.csv'))[-1] separator = ',' reader = open(target_file, 'r') header = reader.readline().split(separator) count = 0 print('Number of columns: %d' % len(header)) print('Reading %s\n' % target_file) model = Trainer() model.load_model('./SVM_classifier.sav') while True: row = reader.readline() if not row: time.sleep(0.1) continue count += 1 # sys.stdout.write('\r' + str(count)) # sys.stdout.flush() # Preprocess row = row.split(separator)[:-1] row = np.delete(np.array(row), [0, 1, 2, 3, 5, 6], 0) row = row.astype(np.float32)