def RunTrain(isSave=False): # MNIST dataset train_dataset = torchvision.datasets.MNIST(root='/data', train=True, transform=transforms.ToTensor(), download=True) # Data loader train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) # cnn model model = cnn.ConvNet(output_classes).to(device) # Loss and optimizer criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # Training mnist_trainer = trainer.Trainer(train_loader, model=model, cri=criterion, opt=optimizer, device=device) mnist_trainer.Execute(epochs) trained_model = mnist_trainer.GetModel() if isSave == True: trainer.SaveModel() return trained_model
def main(): model = Model(27, 18, 9) trainer = Trainer(model) logger = get_io_stream_logger(__name__) model_path = "model.pt" if not os.path.exists(model_path): model.save(model_path) else: model.load(model_path) for _ in range(10000): logger.info("start") board = Board() play_one_game(board, model, logger) logger.info("finish") if not board.result == DRAW: logger.info("start train") loss = trainer.train_from_board(board) logger.info("loss = {}".format(loss)) model.save(model_path)
# 训练数据集,VOC格式数据集, 训练数据取自 ImageSets/Main/train.txt' train_dataset = vocdataset(cfg, is_train=True, transform=transfrom(cfg, is_train=True), target_transform=targettransform(cfg)) # 测试数据集,VOC格式数据集, 测试数据取自 ImageSets/Main/eval.txt' test_dataset = vocdataset(cfg=cfg, is_train=False, transform=transfrom(cfg=cfg, is_train=False), target_transform=targettransform(cfg)) if __name__ == '__main__': """ 使用时,请先打开visdom 命令行 输入 pip install visdom 进行安装 输入 python -m visdom.server' 启动 """ # 首次调用会下载resnet预训练模型 # 实例化模型. 模型的具体各种参数在Config文件中进行配置 net = RetainNet(cfg) # 将模型移动到gpu上,cfg.DEVICE.MAINDEVICE定义了模型所使用的主GPU net.to(cfg.DEVICE.MAINDEVICE) # 初始化训练器,训练器参数通过cfg进行配置;也可传入参数进行配置,但不建议 trainer = Trainer(cfg) # 训练器开始在 数据集上训练模型 trainer(net, train_dataset)
for t in tqdm(lst_T): for k in tqdm(lst_kernel0): ckpt_dir = os.path.expanduser("~") + '/tuning/%dD_%dK' % (t, k) os.makedirs(ckpt_dir, exist_ok=True) args.logdir = ckpt_dir # hps.lag_time = d hps.lst_kernels[0] = k hps.T = t M = ModelDesc(hps) ds_train, ds_test = get_data(hps) x = Trainer(input=QueueInput(ds_train), model=M).train_with_defaults( callbacks=[ ModelSaver(checkpoint_dir=ckpt_dir), # ModelSaver(), callbacks.MergeAllSummaries(), MinSaver('total_loss'), InferenceRunner(ds_test, [ScalarStats('predict_trend/accuracy_')]) ], steps_per_epoch=hps.steps_per_epoch, max_epoch=hps.epochs, session_init=None) # tf.get_variable_scope().reuse_variables() tf.reset_default_graph() del M del ds_train del ds_test del x
if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu return args if __name__ == '__main__': args = get_args() hps = get_default_hparams() M = ModelDesc(hps) logger.auto_set_dir(action='d') ds_train, ds_test = get_data(hps) Trainer( input=QueueInput(ds_train), model=M).train_with_defaults( callbacks=[ ModelSaver(), callbacks.MergeAllSummaries(), MinSaver('total_loss'), # InferenceRunner(ds_test, [ScalarStats('predict_trend/accuracy_')]) InferenceRunner(ds_test, [ ScalarStats('predict_trend/accuracy_'), BinaryClassificationStats(pred_tensor_name='predict_trend/y_pred_one_hot', label_tensor_name='y_one_hot')]) ], steps_per_epoch=hps.steps_per_epoch, max_epoch=hps.epochs, session_init=SaverRestore(args.load) if args.load else None )
def __init__(self, logger: Logger = None): self._logger = logger or get_io_stream_logger(__name__) self._board = Board() self._model = Model(27, 18, 9) self._color = CROSS self._trainer = Trainer(self.model)