def __init__(self, parentThread=None, text_edit=None, text_cursor=None, ui_mode=None): super().__init__(parentThread) args = config.get_args() self.is_mosaic = args.is_mosaic self.variations = args.variations self.mask_color = [ args.mask_color_red / 255.0, args.mask_color_green / 255.0, args.mask_color_blue / 255.0 ] self.decensor_input_path = args.decensor_input_path self.decensor_input_original_path = args.decensor_input_original_path self.decensor_output_path = args.decensor_output_path self.signals = None # Signals class will be given by progressWindow self.model = None self.warm_up = False # if ui_mode is not None: # self.ui_mode = ui_mode # else: # self.ui_mode = args.ui_mode # # if self.ui_mode: # self.text_edit = text_edit # self.text_cursor = text_cursor # self.ui_mode = True if not os.path.exists(self.decensor_output_path): os.makedirs(self.decensor_output_path)
def __init__( self, model, cross_entropy, data_loader, optimizer, lr_scheduler=None, return_pred_and_label=True, ): super().__init__() args = get_args() self.return_pred_and_label = return_pred_and_label if args.use_fp16: self.config.enable_amp(True) self.set_grad_scaler(make_grad_scaler()) elif args.scale_grad: self.set_grad_scaler(make_static_grad_scaler()) self.config.allow_fuse_add_to_output(True) self.config.allow_fuse_model_update_ops(True) # Disable cudnn_conv_heuristic_search_algo will open dry-run. # Dry-run is better with single device, but has no effect with multiple device. self.config.enable_cudnn_conv_heuristic_search_algo(False) self.world_size = flow.env.get_world_size() if self.world_size / args.num_devices_per_node > 1: self.config.enable_cudnn_conv_heuristic_search_algo(True) self.model = model self.cross_entropy = cross_entropy self.data_loader = data_loader self.add_optimizer(optimizer, lr_sch=lr_scheduler)
def get_rpc_creds(self, data, network='mainnet'): # get rpc info from sucr.conf match = re.findall(r'rpc(user|password|port)=(.*?)$', data, re.MULTILINE) # python >= 2.7 creds = {key: value for (key, value) in match} # Fetch port from args, if any (import here to avoid circular deps) from config import get_args args = get_args() if args.rpc_port: creds[u'port'] = args.rpc_port else: # standard Sucre defaults... default_port = 9335 if (network == 'mainnet') else 19335 # use default port for network if not specified in sucr.conf if not ('port' in creds): creds[u'port'] = default_port # convert to an int if taken from sucr.conf creds[u'port'] = int(creds[u'port']) # return a dictionary with RPC credential key, value pairs return creds
def main(): config = get_args() dataset = Dataset(config) model = get_model(config, dataset) evaluator = Evaluator(config, dataset) trainer = Trainer(config, dataset, model, evaluator) trainer.train()
def main(): Config = config.get_args() set_seed(Config.seed) word2ix, ix2word, max_len, avg_len = build_word_dict(Config.train_path) test_data = CommentDataSet(Config.test_path, word2ix, ix2word) test_loader = DataLoader( test_data, batch_size=16, shuffle=False, num_workers=0, collate_fn=mycollate_fn, ) weight = torch.zeros(len(word2ix), Config.embedding_dim) model = SentimentModel(embedding_dim=Config.embedding_dim, hidden_dim=Config.hidden_dim, LSTM_layers=Config.LSTM_layers, drop_prob=Config.drop_prob, pre_weight=weight) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # device = torch.device("cpu") criterion = nn.CrossEntropyLoss() model.load_state_dict(torch.load(Config.model_save_path), strict=True) # 模型加载 confuse_meter = ConfuseMeter() confuse_meter = test(test_loader, device, model, criterion)
def main(): args = get_args() cwd = Path.cwd() # Create the device args.device = torch.device('cuda') if ( not args.cpu and torch.cuda.is_available()) else torch.device('cpu') print(f'Oven: {args.device}') # Load the train-dataloader and validation-dataloader train_dl, val_dl = get_data(args.img_size, args.batch_size) # Load the model, define the loss & optim model, optimizer = get_model(args) criterion = nn.BCELoss() # Create checkpoint directory to store the state cp_dir = cwd / 'checkpoint' cp_dir.mkdir(exist_ok=True) if args.scratch: print('Fresh Bake! Training the network from scratch.') else: path = cp_dir / args.cp_file args.epoch_start, args.best_acc, args.loss = load_checkpoint( model, optimizer, path, args) print(f'Warming Up! Loading the network from: {path}') print(f'Start Epoch: {args.epoch_start}, Accuracy: {args.best_acc}') # Call model fit fit(model, criterion, optimizer, train_dl, val_dl, args)
def __init__(self): args = get_args() for k, v in args.__dict__.items(): setattr(self, k, v) self.rank = flow.env.get_rank() self.world_size = flow.env.get_world_size() self.cur_epoch = 0 self.cur_iter = 0 self.cur_batch = 0 self.is_global = (self.world_size > 1 and not self.ddp) or self.graph self.is_train = False self.meter_lr = self.graph is False self.init_logger() flow.boxing.nccl.set_fusion_threshold_mbytes( self.nccl_fusion_threshold_mb) flow.boxing.nccl.set_fusion_max_ops_num(self.nccl_fusion_max_ops) if self.use_fp16 and self.num_nodes * self.num_devices_per_node > 1: flow.boxing.nccl.enable_use_buffer_to_fuse_all_reduce(False) self.model = resnet50( zero_init_residual=self.zero_init_residual, fuse_bn_relu=self.fuse_bn_relu, fuse_bn_add_relu=self.fuse_bn_add_relu, channel_last=self.channel_last, ) self.init_model() self.cross_entropy = make_cross_entropy(args) self.train_data_loader = make_data_loader(args, "train", self.is_global, self.synthetic_data) self.val_data_loader = make_data_loader(args, "validation", self.is_global, self.synthetic_data) self.optimizer = make_optimizer(args, self.model) self.lr_scheduler = make_lr_scheduler(args, self.optimizer) self.acc = Accuracy() if self.graph: self.train_graph = make_train_graph( self.model, self.cross_entropy, self.train_data_loader, self.optimizer, self.lr_scheduler, return_pred_and_label=self.metric_train_acc, ) self.eval_graph = make_eval_graph(self.model, self.val_data_loader) if self.gpu_stat_file is not None: self.gpu_stat = CudaUtilMemStat(f"rank{self.rank}_" + self.gpu_stat_file, only_ordinal=self.rank) else: self.gpu_stat = None
def main(): # get all arguments args = get_args() # set random seeds #np.random.seed(args.rand_seed) #random.seed(args.rand_seed) #torch.manual_seed(args.rand_seed) # set a logger model_id = time.strftime("%Y%m%d-") + str(uuid.uuid4())[:8] formatter = logging.Formatter('%(asctime)s: %(message)s ', '%m/%d/%Y %I:%M:%S %p') logger = logging.getLogger(model_id) logger.setLevel(logging.INFO) streamHandler = logging.StreamHandler() streamHandler.setFormatter(formatter) logger.addHandler(streamHandler) if args.save_log: fileHandler = logging.FileHandler('./save/log/' + model_id + '.log') fileHandler.setFormatter(formatter) logger.addHandler(fileHandler) logger.info('log file : ./save/log/' + model_id + '.log') logger.info(args) ep, loss, hm, macP, macR, macF1, wP, wR, wF1 = run_experiment(args, logger) logger.info("[Final score - ep:{}] Loss={:5.3f}, Hamming={:2.3f}".format( ep, loss, hm)) logger.info("[ macro ] macP:{:2.3f}, macR:{:2.3f}, macF1:{:2.3f}".format( macP, macR, macF1)) logger.info("[ weighted ] wP:{:2.3f}, wR:{:2.3f}, wF1:{:2.3f}".format( wP, wR, wF1)) if args.save_log: logger.info('log file : ./save/log/' + model_id + '.log')
def __init__(self): from config import get_args args = get_args(sys.argv[1:]) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) cudnn.benchmark = True torch.backends.cudnn.deterministic = True args.cuda = True and torch.cuda.is_available() if args.cuda: print('using cuda.') torch.set_default_tensor_type('torch.cuda.FloatTensor') else: torch.set_default_tensor_type('torch.FloatTensor') # Create Character dict & max seq len args, char2id_dict, id2char_dict = Create_char_dict(args) print(id2char_dict) rec_num_classes = len(id2char_dict) # Get rec num classes / max len print('max len : ' + str(args.max_len)) # init model encoder = ResNet_ASTER(with_lstm=True, n_group=args.n_group, use_cuda=args.cuda) encoder_out_planes = encoder.out_planes decoder = AttentionRecognitionHead(num_classes=rec_num_classes, in_planes=encoder_out_planes, sDim=args.decoder_sdim, attDim=args.attDim, max_len_labels=args.max_len, use_cuda=args.cuda) encoder.load_state_dict(torch.load('params/encoder_final')) decoder.load_state_dict(torch.load('params/decoder_final')) print('fine-tuned model loaded') device = torch.device('cuda') encoder.to(device) decoder.to(device) self.encoder = encoder self.decoder = decoder self.device = device self.args = args self.char2id_dict = char2id_dict self.id2char_dict = id2char_dict
def main(): # os.environ['CUDA_VISIBLE_DEVICES'] = '3' config = get_args() logger = set_logger(config) dataset = StockDataset(config) config.num_relations = dataset.num_relations config.num_companies = dataset.num_companies run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True model_name = config.model_type exp_name = '%s_%s_%s_%s_%s_%s_%s_%s'%(config.data_type, model_name, str(config.test_phase), str(config.test_size), str(config.train_proportion), str(config.lr), str(config.dropout), str(config.lookback)) if not (os.path.exists(os.path.join(config.save_dir, exp_name))): os.makedirs(os.path.join(config.save_dir, exp_name)) sess = tf.Session(config=run_config) model = init_prediction_model(config) init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init) def model_summary(logger): model_vars = tf.trainable_variables() slim.model_analyzer.analyze_vars(model_vars, print_info=True) model_summary(logger) #Training evaluator = Evaluator(config, logger) trainer = Trainer(sess, model, dataset, config, logger, evaluator) trainer.train() #Testing loader = tf.train.Saver(max_to_keep=None) loader.restore(sess, tf.train.latest_checkpoint(os.path.join(config.save_dir, exp_name))) print("saved at {}/{}".format(config.save_dir, exp_name)) print("load best evaluation model") test_loss, report_all, report_topk = evaluator.evaluate(sess, model, dataset, 'test', trainer.best_f1['neighbors']) te_pred_rate, te_acc, te_cpt_acc, te_mac_f1, te_mic_f1, te_exp_rt, te_sharpe = report_all logstr = 'EPOCH {} TEST ALL \nloss : {:2.4f} accuracy : {:2.4f} hit ratio : {:2.4f} pred_rate : {} macro f1 : {:2.4f} micro f1 : {:2.4f} expected return : {:2.4f} sharpe : {:2.4f}'\ .format(trainer.best_f1['epoch'],test_loss,te_acc,te_cpt_acc,te_pred_rate,te_mac_f1,te_mic_f1,te_exp_rt, te_sharpe) logger.info(logstr) te_pred_rate, te_acc, te_cpt_acc, te_mac_f1, te_mic_f1, te_exp_rt, te_sharpe = report_topk logstr = 'EPOCH {} TEST TopK \nloss : {:2.4f} accuracy : {:2.4f} hit ratio : {:2.4f} pred_rate : {} macro f1 : {:2.4f} micro f1 : {:2.4f} expected return : {:2.4f} sharpe : {:2.4f}'\ .format(trainer.best_f1['epoch'],test_loss,te_acc,te_cpt_acc,te_pred_rate,te_mac_f1,te_mic_f1,te_exp_rt, te_sharpe) logger.info(logstr) #Print Log with open('%s_log.log'%model_name, 'a') as out_: out_.write("%d phase\n"%(config.test_phase)) out_.write("%f\t%f\t%f\t%f\t%f\t%f\t%s\t%f\t%f\t%f\t%f\t%f\t%f\t%s\t%d\n"%( report_all[1], report_all[2], report_all[3], report_all[4], report_all[5], report_all[6], str(report_all[0]), report_topk[1], report_topk[2], report_topk[3], report_topk[4], report_topk[5], report_topk[6], str(report_topk[0]), trainer.best_f1['epoch']))
def main(): args = get_args() config = Config(args) tiers = config.tiers.split('_') for tier in tiers: data_set = DataSet(tier, config.save_dir, config.save_h5, config.data_root, config.word_emb_config) data_set.generate()
def main(): args = get_args() label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] df_train = pd.read_csv(args["train_file"]) train_datas = df_train['comment_text'].tolist() train_labels = df_train[label_cols].values.tolist() print("train data size: ", len(train_datas)) print("train label size: ", len(train_labels)) train_data, val_data, train_label, val_label = train_test_split(train_datas, train_labels, test_size=0.2, random_state=0) tokenizer = get_tokenizer(args["bert_model_name"], args["pretrain_model_path"]) train_x, train_y = get_model_data(train_data, train_label, tokenizer, args["max_length"]) val_x, val_y = get_model_data(val_data, val_label, tokenizer, args["max_length"]) label_cols = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] model = create_model(args["bert_model_name"], len(label_cols)) # 自定义计算f1 score # metrics = Metrics(val_x, val_y) # callbacks = [metrics] # 设置保存最优的模型,保存的是pb模型 callbacks = [ tf.keras.callbacks.ModelCheckpoint( # Path where to save the model # The two parameters below mean that we will overwrite # the current checkpoint if and only if # the `val_loss` score has improved. # The saved model name will include the current epoch. filepath="./output/model/1", # {epoch} save_best_only=True, # Only save a model if `val_loss` has improved. monitor='auc', # 'accuracy', verbose=1, ) ] model.fit(train_x, train_y, epochs=args["epoch"], verbose=1, batch_size=args["batch_size"], callbacks=callbacks, validation_data=(val_x, val_y), validation_batch_size=args["batch_size"]) if not os.path.exists(args["model_path"]): os.makedirs(args["model_path"]) model.save_weights(args["model_path"]) if not os.path.exists(args["pbmodel_path"]): os.makedirs(args["pbmodel_path"]) tf.keras.models.save_model(model, args["pbmodel_path"], save_format="tf")
def __init__(self): self.args = config.get_args() self.is_mosaic = self.args.is_mosaic self.mask_color = [float(v/255) for v in self.args.mask_color] # normalize mask color if not os.path.exists(self.args.decensor_output_path): os.makedirs(self.args.decensor_output_path) self.load_model()
def __init__(self): self.args = config.get_args() self.is_mosaic = self.args.is_mosaic self.mask_color = [self.args.mask_color_red/255.0, self.args.mask_color_green/255.0, self.args.mask_color_blue/255.0] if not os.path.exists(self.args.decensor_output_path): os.makedirs(self.args.decensor_output_path) self.load_model()
def main(): config = get_args() if config.mode == 'eval': evaluate(config) else: datasets = get_data(config) model = get_model(config) trainer = Trainer(config, datasets) trainer.train(model)
def main(): # args & device args = config.get_args() if torch.cuda.is_available(): print('Train on GPU!') device = torch.device("cuda") else: device = torch.device("cpu") # dataset assert args.dataset in ['cifar10', 'imagenet'] train_transform, valid_transform = data_transforms(args) if args.dataset == 'cifar10': trainset = torchvision.datasets.CIFAR10(root=os.path.join(args.data_dir, 'cifar'), train=True, download=True, transform=train_transform) train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=8) valset = torchvision.datasets.CIFAR10(root=os.path.join(args.data_dir, 'cifar'), train=False, download=True, transform=valid_transform) val_loader = torch.utils.data.DataLoader(valset, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=8) elif args.dataset == 'imagenet': train_data_set = datasets.ImageNet(os.path.join(args.data_dir, 'ILSVRC2012', 'train'), train_transform) val_data_set = datasets.ImageNet(os.path.join(args.data_dir, 'ILSVRC2012', 'valid'), valid_transform) train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True, sampler=None) val_loader = torch.utils.data.DataLoader(val_data_set, batch_size=args.batch_size, shuffle=False, num_workers=8, pin_memory=True) # SinglePath_OneShot choice = [3, 1, 2, 1, 0, 1, 3, 3, 1, 3, 0, 1, 0, 3, 3, 3, 3, 3, 0, 3] #[2, 0, 2, 3, 2, 2, 3, 1, 2, 1, 0, 1, 0, 3, 1, 0, 0, 2, 3, 2] model = SinglePath_Network(args.dataset, args.resize, args.classes, args.layers, choice) criterion = nn.CrossEntropyLoss().to(device) optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, args.momentum, args.weight_decay) scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: 1 - (epoch / args.epochs)) # flops & params & structure flops, params = profile(model, inputs=(torch.randn(1, 3, 32, 32),) if args.dataset == 'cifar10' else (torch.randn(1, 3, 224, 224),), verbose=False) # print(model) print('Random Path of the Supernet: Params: %.2fM, Flops:%.2fM' % ((params / 1e6), (flops / 1e6))) model = model.to(device) summary(model, (3, 32, 32) if args.dataset == 'cifar10' else (3, 224, 224)) # train supernet start = time.time() for epoch in range(args.epochs): train(args, epoch, train_loader, device, model, criterion, optimizer, scheduler, supernet=False) scheduler.step() if (epoch + 1) % args.val_interval == 0: validate(args, epoch, val_loader, device, model, criterion, supernet=False) utils.save_checkpoint({'state_dict': model.state_dict(), }, epoch + 1, tag=args.exp_name) utils.time_record(start)
def __init__(self, model, data_loader): super().__init__() args = get_args() if args.use_fp16: self.config.enable_amp(True) self.config.allow_fuse_add_to_output(True) self.data_loader = data_loader self.model = model
def main(): # import ipdb; ipdb.set_trace() config = get_args() os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(config.gpu_id) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' logger = get_logger(config) voc = load_vocab(config) config.n_voc = len(voc.voc) logger.info(config) config.embeddings = voc.embeddings config.use_product_info = False config.use_user_info = False if config.use_user_info and config.use_product_info: usrdict = UserTable('../data/' + config.dataname + '/usrlist.txt') prddict = ProductTable('../data/' + config.dataname + '/prdlist.txt') config.n_users = usrdict.size + 1 config.n_products = prddict.size + 1 else: usrdict = None prddict = None logger.info("build model...") with tf.device("/device:{}:{}".format(config.device_type, config.gpu_id)): model = Model(config) logger.info("creating session...") gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=config.gpu_allocate_rate) gpu_options.allow_growth = True session_config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) sess = tf.Session(config=session_config) model.init_variable(sess) trainset, devset, testset = load_dataset(config, voc, usrdict, prddict) if config.load_model: logger.info("restoring model...") model.restore(sess) if not config.test_only: logger.info("starting training...") model.train(sess, trainset, devset, testset) logger.info("training done.") logger.info("starting testing...") test_acc, test_mae, test_rmse = model.evaluate(sess, testset) logger.info( "final result of testset: acc = {:.4f}, mae = {:.4f}, rmse = {:.4f}". format(test_acc, test_mae, test_rmse)) logger.info("testing done.")
def __init__(self): args = get_args() self.args = args self.save_path = args.model_save_dir self.save_init = args.save_initial_model self.save_model_after_each_eval = args.save_model_after_each_eval self.eval_after_training = args.eval_after_training self.dataset_format = args.dataset_format self.execution_mode = args.execution_mode self.max_iter = args.max_iter self.loss_print_every_n_iter = args.loss_print_every_n_iter self.ddp = args.ddp if self.ddp == 1 and self.execution_mode == "graph": warnings.warn( """when ddp is True, the execution_mode can only be eager, but it is graph""", UserWarning, ) self.execution_mode = "eager" self.is_global = ( flow.env.get_world_size() > 1 and not args.ddp ) or args.execution_mode == "graph" self.rank = flow.env.get_rank() self.world_size = flow.env.get_world_size() self.cur_iter = 0 self.eval_interval = args.eval_interval self.eval_batchs = args.eval_batchs self.init_logger() self.train_dataloader = make_data_loader( args, "train", self.is_global, self.dataset_format ) self.val_dataloader = make_data_loader( args, "val", self.is_global, self.dataset_format ) self.wdl_module = make_wide_and_deep_module(args, self.is_global) self.init_model() self.opt = flow.optim.Adam(self.wdl_module.parameters(), lr=args.learning_rate) self.loss = flow.nn.BCELoss(reduction="none").to("cuda") if self.execution_mode == "graph": params, sparse_params = [], [] for name, param in self.wdl_module.named_parameters(): sparse_params.append(param) if "embedding" in name else params.append( param ) self.opt = flow.optim.Adam(params, lr=args.learning_rate) sparse_opt = flow.optim.Adam(sparse_params, lr=args.learning_rate) sparse_opt = flow.optim.utils.SparseOptimizer(sparse_opt) self.eval_graph = WideAndDeepValGraph(self.wdl_module, self.val_dataloader) self.train_graph = WideAndDeepTrainGraph( self.wdl_module, self.train_dataloader, self.loss, self.opt, sparse_opt )
def main(): # os.environ['CUDA_VISIBLE_DEVICES'] = '3' config = get_args() logger = set_logger(config) dataset = StockDataset(config) config.num_relations = dataset.num_relations config.num_companies = dataset.num_companies run_config = tf.ConfigProto(log_device_placement=False) run_config.gpu_options.allow_growth = True exp_name = '%s_%s_%s_%s_%s_%s_%s_%s' % ( config.data_type, config.model_type, str(config.test_phase), str(config.test_size), str(config.train_proportion), str( config.lr), str(config.dropout), str(config.lookback)) # save train file if not (os.path.exists(os.path.join(config.save_dir, exp_name))): os.makedirs(os.path.join(config.save_dir, exp_name)) sess = tf.Session(config=run_config) model = init_prediction_model(config) init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init) def model_summary(): model_vars = tf.trainable_variables() slim.model_analyzer.analyze_vars( model_vars, print_info=True) # print the name and shapes of the variables model_summary() if 'graph' in config.model_type: evaluator = GEvaluator(config, logger) # trainer = GTrainer(sess, model, dataset, config, logger, evaluator) else: evaluator = Evaluator(config, logger) # trainer = Trainer(sess, model, dataset, config, logger, evaluator) # trainer.train() #Testing loader = tf.train.Saver(max_to_keep=None) loader.restore( sess, tf.train.latest_checkpoint(os.path.join(config.save_dir, exp_name))) print("saved at {}/{}".format(config.save_dir, exp_name)) print("load best evaluation model") test_loss, report = evaluator.evaluate(sess, model, dataset, 'test', True) te_pred_rate, te_acc, te_cpt_acc, te_mac_f1, te_mic_f1, te_exp_rt, te_sharpe = report logstr = 'TEST ALL \nloss : {:2.4f} accuracy : {:2.4f} hit ratio : {:2.4f} pred_rate : {} macro f1 : {:2.4f} micro f1 : {:2.4f} expected return : {:2.4f} sharpe : {:2.4f}'\ .format(test_loss,te_acc,te_cpt_acc,te_pred_rate,te_mac_f1,te_mic_f1,te_exp_rt, te_sharpe) logger.info(logstr)
def main(): config = get_args() logger = set_logger(config) dataset = StockDataset(config) config.num_relations = dataset.num_relations config.num_companies = dataset.num_companies run_config = tf.ConfigProto(log_device_placement=False) run_config.gpu_options.allow_growth = True exp_name = '%s_%s_%s_%s_%s_%s_%s_%s'%(config.data_type, config.model_type, str(config.test_phase), str(config.test_size), str(config.train_proportion), str(config.lr), str(config.dropout), str(config.lookback)) # save train file if not (os.path.exists(os.path.join(config.save_dir, exp_name))): os.makedirs(os.path.join(config.save_dir, exp_name)) sess = tf.Session(config=run_config) model = init_prediction_model(config) init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess.run(init) def model_summary(): model_vars = tf.trainable_variables() slim.model_analyzer.analyze_vars(model_vars, print_info=True) # print the name and shapes of the variables model_summary() if config.mode == 'train': if 'graph' in config.model_type: evaluator = GEvaluator(config, logger) trainer = GTrainer(sess, model, dataset, config, logger, evaluator) else: evaluator = Evaluator(config, logger) trainer = Trainer(sess, model, dataset, config, logger, evaluator) trainer.train() #Testing loader = tf.train.Saver(max_to_keep=None) loader.restore(sess, tf.train.latest_checkpoint(os.path.join(config.save_dir, exp_name))) print("load best evaluation model") test_loss, report = evaluator.evaluate(sess, model, dataset, 'test') te_pred_rate, te_acc, te_cpt_acc, te_mac_f1, te_mic_f1, te_exp_rt = report logstr = 'EPOCH {} TEST ALL \nloss : {:2.4f} accuracy : {:2.4f} hit ratio : {:2.4f} pred_rate : {} macro f1 : {:2.4f} micro f1 : {:2.4f} expected return : {:2.4f}'\ .format(trainer.best_f1['epoch'],test_loss,te_acc,te_cpt_acc,te_pred_rate,te_mac_f1,te_mic_f1,te_exp_rt) logger.info(logstr) with open('%s_log.log'%config.GNN_model+'_'+config.model_type+'_'+config.data_type+'_'+config.price_model+'_'+str(config.test_phase), 'a') as out_: out_.write("%d phase\n"%(config.test_phase)) out_.write("%f\t%f\t%f\t%f\t%f\t%s\t%d\n"%( report[1], report[2], report[3], report[4], report[5], str(report[0]), trainer.best_f1['epoch']))
def main(): config = get_args() config.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") Dataset = Imagenet(config) model = models.resnet18(pretrained=False, progress=True).to(config.device) runner = Runner(config, Dataset, model) if config.train: runner.train() elif config.test: runner.test() elif config.generate: runner.generate()
def __init__(self): args = config.get_args() self.is_mosaic = args.is_mosaic self.variations = args.variations self.mask_color = [ args.mask_color_red / 255.0, args.mask_color_green / 255.0, args.mask_color_blue / 255.0 ] self.decensor_input_path = args.decensor_input_path self.decensor_input_original_path = args.decensor_input_original_path self.decensor_output_path = args.decensor_output_path if not os.path.exists(self.decensor_output_path): os.makedirs(self.decensor_output_path)
def main(): config = get_args() dataset = Data(config) num_features = dataset.features.shape[1] num_classes = dataset.labels.max().item() + 1 model = GCN(config=config, num_features=num_features, num_classes=num_classes) solver = Solver(config, model, dataset) if torch.cuda.is_available(): model = model.to('cuda') criterion, best_model = solver.train() solver.test(criterion, best_model)
def main(): log.info("startup") args = get_args() loop = asyncio.get_event_loop() app = loop.run_until_complete( setup( config_endpoint=args.config_endpoint, port=args.port, )) loop.run_until_complete(util.run_app(app)) loop.run_forever()
def main(argv): # Get command-line arguments cfg = config.get_args(argv) config.show_args(cfg) model = config.validate_args(cfg) # Get the pre-trained model pre = fn.get_preprocessor(cfg.model) pixels = get_shape(cfg.model) # Extract and save the feature vectors features = fn.get_feature_recs(model, pre, cfg.path, pixels) # Extract features from image files fn.save_features(features, cfg.output) print('\nFound ' + str(len(features)) + ' records.')
def apk(actual, predicted, k=config.get_args().batch_size): """ Computes the average precision at k. This function computes the average prescision at k between two lists of items. Parameters ---------- actual : list A list of elements that are to be predicted (order doesn't matter) predicted : list A list of predicted elements (order does matter) k : int, optional The maximum number of predicted elements Returns ------- score : double The average precision at k over the input lists """ ''' Let’s say, we recommended 7 products and 1st, 4th, 5th, 6th product was correct. so the result would look like — 1, 0, 0, 1, 1, 1, 0. In this case, The precision at 1 will be: 1/1 = 1 The precision at 2 will be: 0 The precision at 3 will be: 0 The precision at 4 will be: 2/4 = 0.5 The precision at 5 will be: 3/5 = 0.6 The precision at 6 will be: 4/6 = 0.66 The precision at 7 will be: 0 Average Precision will be: 1 + 0 + 0 + 0.5 + 0.6 + 0.66 + 0 /4 = 0.69 — Please note that here we always sum over the correct images, hence we are dividing by 4 and not 7. ''' if len(predicted) > k: predicted = predicted[:k] score = 0.0 num_hits = 0.0 for i, p in enumerate(predicted): if predicted[i] == 1 and actual[i] == 1: num_hits += 1.0 score += num_hits / (i + 1.0) if not actual: return 0.0 return score / min(len(actual), k)
def main(): args = get_args() device = torch.device('cuda' if args.cuda else 'cpu') env = create_train_env(1, args.difficulty, args.macro, 'env1.mp4') input_size = env.observation_space.shape[0] output_size = env.action_space.n model = RNNActorCriticNetwork(input_size, output_size, args.noise_linear).to(device) model.eval() dummy_input = torch.rand(1, 1, *env.observation_space.shape).to(device=device) writer = SummaryWriter(log_dir=args.log_dir) writer.add_graph(model, (dummy_input, ))
def main(): global opt, model opt = config.get_args() print(opt) log_file = None starting_time = time.time() print_cz(str=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), f=log_file) cudnn.benchmark = True print_cz("===> Building model", f=log_file) spatial_sr = spatial_stage.Spatial_Stage() wavelet_sr = wavelet_stage.Wavelet_Stage() model = swdnet.SWDNet(spatial_sr, wavelet_sr) if opt.data_degradation in ['bicubic', 'Bicubic']: pth_file = './weights/swdnet-bicubic-dict.pth' elif opt.data_degradation in ['nearest', 'Nearest']: pth_file = './weights/swdnet-nearest-dict.pth' model.load_state_dict(torch.load(pth_file)) print_cz("===> Setting GPU", f=log_file) if opt.job_type == 'S' or opt.job_type == 's': model.cuda() else: if opt.job_type == 'Q' or opt.job_type == 'q': gpu_device_ids = [0, 1, 2, 3] elif opt.job_type == 'E' or opt.job_type == 'e': gpu_device_ids = [0, 1, 2, 3, 4, 5, 6, 7] elif opt.job_type == 'D' or opt.job_type == 'd': gpu_device_ids = [0, 1] model = nn.DataParallel(model.cuda(), device_ids=gpu_device_ids).cuda() criterion = nn.L1Loss(size_average=False) print_cz("===> Loading datasets", f=log_file) testing_data_loader = data_loader_lmdb.get_loader( os.path.join(config.dataset_dir, opt.data_degradation, 'test_lmdb'), batch_size=opt.batch_size, stage='test', num_workers=opt.num_workers) print_cz("===> Testing", f=log_file) test(testing_data_loader, model, criterion, epoch=0, logfile=None) print_cz(str(time.time() - starting_time), f=log_file)
def main(): args = get_args() df_test = pd.read_csv(args["test_file"]) test_data = df_test['comment_text'].values.tolist() label_cols = [ 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate' ] tokenizer = get_tokenizer(args['bert_model_name'], args['pretrain_model_path']) testdata = get_model_data(test_data, tokenizer, args["max_length"]) model = create_model(args["bert_model_name"], len(label_cols)) model.load_weights(args["model_path"]) pred_logits = model.predict(testdata) pred = np.where(pred_logits > 0.15, 1, 0).tolist() print(pred)
sample_train = gen_examples([train_x1[k] for k in samples], [train_x2[k] for k in samples], train_l[samples], [train_y[k] for k in samples], args.batch_size) logging.info('Train accuracy: %.2f %%' % eval_acc(test_fn, sample_train)) logging.info('Dev accuracy: %.2f %%' % eval_acc(test_fn, all_dev)) if dev_acc > best_acc: best_acc = dev_acc logging.info('Best dev accuracy: epoch = %d, n_udpates = %d, acc = %.2f %%' % (epoch, n_updates, dev_acc)) utils.save_params(args.model_file, params, epoch=epoch, n_updates=n_updates) if __name__ == '__main__': args = config.get_args() np.random.seed(args.random_seed) lasagne.random.set_rng(np.random.RandomState(args.random_seed)) if args.train_file is None: raise ValueError('train_file is not specified.') if args.dev_file is None: raise ValueError('dev_file is not specified.') if args.rnn_type == 'lstm': args.rnn_layer = lasagne.layers.LSTMLayer elif args.rnn_type == 'gru': args.rnn_layer = lasagne.layers.GRULayer else: raise NotImplementedError('rnn_type = %s' % args.rnn_type)