Exemplo n.º 1
0
def RunTrain(isSave=False):

    # MNIST dataset
    train_dataset = torchvision.datasets.MNIST(root='/data',
                                               train=True,
                                               transform=transforms.ToTensor(),
                                               download=True)
    # Data loader
    train_loader = DataLoader(train_dataset,
                              batch_size=batch_size,
                              shuffle=True)

    # cnn model
    model = cnn.ConvNet(output_classes).to(device)
    # Loss and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # Training
    mnist_trainer = trainer.Trainer(train_loader,
                                    model=model,
                                    cri=criterion,
                                    opt=optimizer,
                                    device=device)
    mnist_trainer.Execute(epochs)
    trained_model = mnist_trainer.GetModel()

    if isSave == True:
        trainer.SaveModel()

    return trained_model
Exemplo n.º 2
0
def testRun():
    # MNIST dataset
    train_dataset = torchvision.datasets.MNIST(root='/data',
                                               train=True,
                                               transform=transforms.ToTensor(),
                                               download=True)

    cv_splits = 3
    kfold = KFold(n_splits=cv_splits, shuffle=True, random_state=0)
    for fold_idx, (train_idx,
                   valid_idx) in enumerate(kfold.split(train_dataset)):
        print(">> CV fold step ", str(fold_idx))
        # cnn model
        model = mlp.SimpleMLP(output_classes).to(device)
        # Loss and optimizer
        criterion = nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

        # Data loader
        train_loader = DataLoader(Subset(train_dataset, train_idx),
                                  batch_size=batch_size,
                                  shuffle=True)

        # Training
        mnist_trainer = trainer.MLPTrainer(train_loader,
                                           model=model,
                                           cri=criterion,
                                           opt=optimizer,
                                           device=device)
        train_result = mnist_trainer.Execute(epochs)
Exemplo n.º 3
0
class Game:
    def __init__(self, logger: Logger = None):
        self._logger = logger or get_io_stream_logger(__name__)
        self._board = Board()
        self._model = Model(27, 18, 9)
        self._color = CROSS
        self._trainer = Trainer(self.model)

    def clear(self):
        self._board = Board()
        self._color = CROSS

    @property
    def board(self):
        return self._board

    @property
    def board_as_mat(self):
        return self.board.as_list.reshape([3, 3])

    @property
    def model(self):
        return self._model

    @property
    def color(self):
        return self._color

    def turn_color(self):
        self._color = turn_color(self.color)

    def _train(self):
        if self.board.result == DRAW:
            self._logger.info("Result is DRAW, do NOT training")
            return False
        loss = self._trainer.train_from_board(self.board)
        return loss

    def one_move(self) -> np.ndarray:
        if self.board.is_finished:
            self._logger.info("Game Finished!")
            self._logger.info("Training Start")
            loss = self._train()
            self._logger.info("Training Finished")
            if loss:
                self._logger.info("Training Loss = {}".format(loss))
            self.clear()
            self._logger.info("Game Start")
            return self.board_as_mat

        policy = self.model.infer(self.board.data(self.color))
        pos = choice_move(policy, self.board, 5)
        if pos is False:
            self._logger.error("*** Inference Error ***")
            sys.exit()
        self.board.put(pos, self.color)
        self.turn_color()
        return self.board_as_mat
Exemplo n.º 4
0
def main():
    model = Model(27, 18, 9)
    trainer = Trainer(model)
    logger = get_io_stream_logger(__name__)

    model_path = "model.pt"
    if not os.path.exists(model_path):
        model.save(model_path)
    else:
        model.load(model_path)

    for _ in range(10000):
        logger.info("start")
        board = Board()
        play_one_game(board, model, logger)
        logger.info("finish")

        if not board.result == DRAW:
            logger.info("start train")
            loss = trainer.train_from_board(board)
            logger.info("loss = {}".format(loss))
            model.save(model_path)
Exemplo n.º 5
0
# 训练数据集,VOC格式数据集, 训练数据取自 ImageSets/Main/train.txt'
train_dataset = vocdataset(cfg,
                           is_train=True,
                           transform=transfrom(cfg, is_train=True),
                           target_transform=targettransform(cfg))

# 测试数据集,VOC格式数据集, 测试数据取自 ImageSets/Main/eval.txt'
test_dataset = vocdataset(cfg=cfg,
                          is_train=False,
                          transform=transfrom(cfg=cfg, is_train=False),
                          target_transform=targettransform(cfg))

if __name__ == '__main__':
    """
    使用时,请先打开visdom
    
    命令行 输入  pip install visdom          进行安装 
    输入        python -m visdom.server'    启动
    """

    # 首次调用会下载resnet预训练模型

    # 实例化模型. 模型的具体各种参数在Config文件中进行配置
    net = RetainNet(cfg)
    # 将模型移动到gpu上,cfg.DEVICE.MAINDEVICE定义了模型所使用的主GPU
    net.to(cfg.DEVICE.MAINDEVICE)
    # 初始化训练器,训练器参数通过cfg进行配置;也可传入参数进行配置,但不建议
    trainer = Trainer(cfg)
    # 训练器开始在 数据集上训练模型
    trainer(net, train_dataset)
Exemplo n.º 6
0
for t in tqdm(lst_T):
    for k in tqdm(lst_kernel0):

        ckpt_dir = os.path.expanduser("~") + '/tuning/%dD_%dK' % (t, k)
        os.makedirs(ckpt_dir, exist_ok=True)
        args.logdir = ckpt_dir
        # hps.lag_time = d
        hps.lst_kernels[0] = k
        hps.T = t
        M = ModelDesc(hps)
        ds_train, ds_test = get_data(hps)
        x = Trainer(input=QueueInput(ds_train), model=M).train_with_defaults(
            callbacks=[
                ModelSaver(checkpoint_dir=ckpt_dir),
                # ModelSaver(),
                callbacks.MergeAllSummaries(),
                MinSaver('total_loss'),
                InferenceRunner(ds_test,
                                [ScalarStats('predict_trend/accuracy_')])
            ],
            steps_per_epoch=hps.steps_per_epoch,
            max_epoch=hps.epochs,
            session_init=None)
        # tf.get_variable_scope().reuse_variables()
        tf.reset_default_graph()
        del M
        del ds_train
        del ds_test
        del x
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    return args


if __name__ == '__main__':
    args = get_args()

    hps = get_default_hparams()
    M = ModelDesc(hps)

    logger.auto_set_dir(action='d')
    ds_train, ds_test = get_data(hps)

    Trainer(
        input=QueueInput(ds_train), model=M).train_with_defaults(
        callbacks=[
            ModelSaver(),
            callbacks.MergeAllSummaries(),
            MinSaver('total_loss'),
            # InferenceRunner(ds_test, [ScalarStats('predict_trend/accuracy_')])
            InferenceRunner(ds_test, [
                ScalarStats('predict_trend/accuracy_'),
                BinaryClassificationStats(pred_tensor_name='predict_trend/y_pred_one_hot',
                                          label_tensor_name='y_one_hot')])
        ],
        steps_per_epoch=hps.steps_per_epoch,
        max_epoch=hps.epochs,
        session_init=SaverRestore(args.load) if args.load else None
    )
Exemplo n.º 8
0
def RunTorchCV():

    with mlflow.start_run():

        # MNIST dataset
        train_dataset = torchvision.datasets.MNIST(
            root='/data',
            train=True,
            transform=transforms.ToTensor(),
            download=True)

        train_results = {}
        valid_results = {}

        cv_splits = 3
        kfold = KFold(n_splits=cv_splits, shuffle=True, random_state=0)
        for fold_idx, (train_idx,
                       valid_idx) in enumerate(kfold.split(train_dataset)):
            print(">> CV fold step ", str(fold_idx))
            # cnn model
            model = mlp.SimpleMLP(output_classes).to(device)
            # Loss and optimizer
            criterion = nn.CrossEntropyLoss()
            optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

            # Data loader
            train_loader = DataLoader(Subset(train_dataset, train_idx),
                                      batch_size=batch_size,
                                      shuffle=True)
            valid_loader = DataLoader(Subset(train_dataset, valid_idx),
                                      batch_size=batch_size,
                                      shuffle=False)

            # Training
            mnist_trainer = trainer.MLPTrainer(train_loader,
                                               model=model,
                                               cri=criterion,
                                               opt=optimizer,
                                               device=device)
            train_result = mnist_trainer.Execute(epochs)
            trained_model = mnist_trainer.GetModel()
            train_results[fold_idx] = train_result

            # Validation
            mnist_validator = validator.MLPValidator(valid_loader,
                                                     model=trained_model,
                                                     criterion=criterion,
                                                     device=device)
            valid_result = mnist_validator.Validate()
            valid_results[fold_idx] = valid_result

        mlflow.log_param("method_name",
                         mlp.SimpleMLP(output_classes).__class__.__name__)
        mlflow.log_param("output_class", output_classes)
        mlflow.log_param("batch_size", batch_size)
        mlflow.log_param("learning_rate", learning_rate)

        mlflow.log_param("fold_type", kfold.__class__.__name__)
        mlflow.log_param("n_splits", cv_splits)
        mlflow.log_param("random_state", 0)

        mlflow.log_param("criterion", nn.CrossEntropyLoss.__class__.__name__)
        mlflow.log_param("optimizer", torch.optim.Adam.__name__)

        average_loss = 0
        average_acc = 0
        for fold_idx, cv_result in train_results.items():
            loss = cv_result[cv_splits - 1]["loss"]
            acc = cv_result[cv_splits - 1]["accuracy"]
            average_loss += loss
            average_acc += acc
            mlflow.log_metric("fold_" + str(fold_idx) + "_loss", loss)
            mlflow.log_metric("fold_" + str(fold_idx) + "_accuracy", acc)

        average_loss = average_loss / cv_splits
        average_acc = average_acc / cv_splits
        mlflow.log_metric("average_loss", average_loss)
        mlflow.log_metric("average_acc", average_acc)

    return valid_results
Exemplo n.º 9
0
 def __init__(self, logger: Logger = None):
     self._logger = logger or get_io_stream_logger(__name__)
     self._board = Board()
     self._model = Model(27, 18, 9)
     self._color = CROSS
     self._trainer = Trainer(self.model)