learning_rate = 1e-2
    epsilon = 1e-8
    epoch_num = 200
    batch_size = 120
    n = math.floor(batch_size / 2)
    input_dim = 1024
    seg_num = 32

    test_path_dir = '/home/yangzehua/RoadAccidentsDetector/ucf_train_test_info/URAD_Test.txt'
    anno_dir = '/home/yangzehua/RoadAccidentsDetector/ucf_train_test_info/URAD_Annotations.txt'

    os.environ['CUDA_VISIBLE_DEVICES'] = '6'
    net = TemporalRegularityDetector(pretrained=False,
                                     input_dim=input_dim).train().cuda()
    reg_optimizer = Adam(net.parameters(),
                         lr=learning_rate,
                         eps=epsilon,
                         weight_decay=regularization_weight)
    loss_func = nn.MSELoss().cuda()
    # scheduler = ReduceLROnPlateau(optimizer=reg_optimizer, mode='min', factor=0.1, patience=20, min_lr=1e-5,
    #                               verbose=True)
    scheduler = CosineAnnealingWarmRestarts(reg_optimizer,
                                            T_0=10,
                                            T_mult=2,
                                            eta_min=1e-5)

    split = 'train'
    seg_dir = '/home/yangzehua/UCF_Crimes/FLOW_Segments'
    test_seg_dir = os.path.join(seg_dir, 'test')
    model_save_dir = 'AE_FLOW.pt'

    dataset = VideoDataset(data_dir=seg_dir, split=split)
Ejemplo n.º 2
0
def main(args):
    if args.plot_results:
        plt.switch_backend('Agg')

    num_epochs = args.num_epochs
    loss_fn = nn.CrossEntropyLoss()

    ds = UsersDataset(it_flag=args.use_gdelt or args.compare_gdelt)
    train_dl, test_dl = get_dataloaders(ds,
                                        train_ratio=args.train_ratio,
                                        batch_size=args.batch_size,
                                        load_rand_state=args.load_rand_state)
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    w2v_model = Word2Vec.load("checkpoints/word2vec.model")

    temporal_options = [False, True
                        ] if args.compare_temporal else [args.use_TCN]
    gdelt_options = [False, True] if args.compare_gdelt else [args.use_gdelt]
    fig = None

    for use_gdelt in gdelt_options:
        for use_TCN in temporal_options:
            subrun_name = get_subrun_name(args.run_name,
                                          use_gdelt=use_gdelt,
                                          use_TCN=use_TCN)

            clf = BotClassifier(w2v_model,
                                args.embedding_dim,
                                args.rec_hidden_dim,
                                args.tweet_features_dim,
                                args.hidden_dim,
                                use_gdelt=use_gdelt,
                                use_TCN=use_TCN,
                                effective_history=args.effective_history,
                                num_rec_layers=args.num_rec_layers,
                                rec_dropout=args.rec_dropout).to(device)

            if args.use_SGD:
                optim = SGD(params=clf.parameters(),
                            lr=args.learning_rate,
                            weight_decay=args.weight_decay)
            else:
                optim = Adam(params=clf.parameters(),
                             lr=args.learning_rate,
                             weight_decay=args.weight_decay)

            trainer = TorchTrainer(clf, loss_fn, optim, device=device)

            checkpoint_file = None
            if args.use_checkpoint:
                checkpoint_file = f"{subrun_name}.model"

            print(
                "================================================================================"
            )
            print(
                "===============================|STARTED TRAINING|==============================="
            )
            print(
                "================================================================================"
            )
            print(
                f'Training with extractor {"TCN" if use_TCN else "LSTM"} and use_gdelt={use_gdelt}:'
            )
            fit_res = trainer.fit(train_dl,
                                  test_dl,
                                  num_epochs,
                                  checkpoints=checkpoint_file,
                                  early_stopping=args.early_stopping)
            print(
                "================================================================================"
            )
            print(
                '===================================|FINISHED|==================================='
            )
            print(
                "================================================================================"
            )
            print('')
            display_fit_result(fit_res)

            if args.plot_results:
                fig, _ = plot_fit(fit_res,
                                  fig=fig,
                                  legend=subrun_name.replace('_', ' '))

    if args.plot_results:
        fig.suptitle(args.run_name.replace('_', ' '))
        plt.savefig(f"graphs/{args.run_name}.png")
Ejemplo n.º 3
0
def main():

    parser = argparse.ArgumentParser(
        parents=[
            lm_argparser.lm_parser, lm_argparser.model_parser,
            lm_argparser.train_parser
        ],
        #formatter_class=argparse.ArgumentDefaultsHelpFormatter
        description="Training LMs")

    args = parser.parse_args()
    torch.manual_seed(args.seed)

    print("***** Arguments *****")
    print(args)

    if torch.cuda.is_available():
        if not args.cuda:
            print(
                "WARNING: You have a CUDA device, so you should probably run with --cuda"
            )
        else:
            torch.cuda.manual_seed(args.seed)

    global corpus
    corpus = data.Corpus(args.data)

    eval_batch_size = 10
    train_data = batchify(corpus.train, args.batch_size, args.cuda)
    val_data = batchify(corpus.valid, eval_batch_size, args.cuda)
    test_data = batchify(corpus.test, eval_batch_size, args.cuda)

    ntokens = len(corpus.dictionary)
    print("Vocab size", ntokens)

    if not args.hidden_sizes:
        args.hidden_sizes = args.nlayers * [
            args.nhid,
        ]

    if args.mode == "bidir":  # bidirectional with fixed window, forward and backward are trained simultaneously
        model = BidirectionalLanguageModel(corpus.dictionary.word2idx,
                                           args.emsize, args.hidden_sizes,
                                           args.dropout, args.rnncell,
                                           args.pretrained_embs,
                                           args.fixed_embs, args.tied)
    else:  # simple forward or backward LMs
        model = RNNLanguageModel(corpus.dictionary.word2idx, args.emsize,
                                 args.hidden_sizes, args.dropout, args.rnncell,
                                 args.pretrained_embs, args.fixed_embs,
                                 args.tied)

    if args.cuda:
        model.cuda()

    print("********* Model **********")
    print(model)

    print("Number of parameters",
          sum(p.numel() for p in model.parameters() if p.requires_grad))

    print("******** Training ********")
    lr = args.lr
    params = filter(lambda p: p.requires_grad, model.parameters())
    if args.optimizer == "adam":
        optimizer = Adam(params, lr=lr, amsgrad=True)
    else:
        optimizer = SimpleSGD(params, lr=lr)

    best_val_loss = None

    try:
        for epoch in range(1, args.epochs + 1):
            epoch_start_time = time.time()
            seq_len = args.bptt

            train(model, train_data, len(corpus.dictionary), seq_len, args,
                  epoch, optimizer)
            with torch.no_grad():
                val_loss = evaluate(model, val_data, len(corpus.dictionary),
                                    seq_len, eval_batch_size, args.mode)
            print('-' * 89)
            print(
                '| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
                'valid ppl {:8.2f}'.format(epoch,
                                           (time.time() - epoch_start_time),
                                           val_loss, math.exp(val_loss)))
            print('-' * 89)
            # Save the model if the validation loss is the best we've seen so far.
            if not best_val_loss or val_loss < best_val_loss:
                with open(args.save, 'wb') as f:
                    torch.save(model, f)
                best_val_loss = val_loss
            else:
                # Anneal the learning rate if no improvement has been seen in the validation dataset.
                if args.optimizer == "sgd":
                    optimizer.update()

    except KeyboardInterrupt:
        print('-' * 89)
        print('Exiting from training early')

    # Load the best saved model.
    with open(args.save, 'rb') as f:
        model = torch.load(f)

    # Run on test data.
    with torch.no_grad():
        test_loss = evaluate(model, test_data, len(corpus.dictionary),
                             args.bptt, eval_batch_size, args.mode)
    print('=' * 89)
    print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
        test_loss, math.exp(test_loss)))
    print('=' * 89)
Ejemplo n.º 4
0
def main(args):

    data_dir = args.data_dir
    figure_path = args.figure_dir
    model_path = args.model_dir

    # Generate the data input path list. Each subject has 3 runs stored in 3 different files.
    subj_id = "/sub" + str(args.sub) + "/ball0"
    raw_fnames = [
        "".join([data_dir, subj_id, str(i), "_sss_trans.fif"])
        for i in range(1 if args.sub != 3 else 2, 4)
    ]

    # local
    # subj_id = "/sub"+str(args.sub)+"/ball"
    # raw_fnames = ["".join([data_dir, subj_id, str(i), "_sss.fif"]) for i in range(1, 2)]

    # Set skip_training to False if the model has to be trained, to True if the model has to be loaded.
    skip_training = False

    # Set the torch device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Device = {}".format(device))

    # Initialize parameters
    parameters = Params_tunable(
        subject_n=args.sub,
        hand=args.hand,
        batch_size=args.batch_size,
        valid_batch_size=args.batch_size_valid,
        test_batch_size=args.batch_size_test,
        epochs=args.epochs,
        lr=args.learning_rate,
        duration=args.duration,
        overlap=args.overlap,
        patience=args.patience,
        device=device,
        y_measure=args.y_measure,
        s_n_layer=args.s_n_layer,
        # s_kernel_size=args.s_kernel_size,  # Local
        s_kernel_size=json.loads(" ".join(args.s_kernel_size)),
        t_n_layer=args.t_n_layer,
        # t_kernel_size=args.t_kernel_size,  # Local
        t_kernel_size=json.loads(" ".join(args.t_kernel_size)),
        max_pooling=args.max_pooling,
        ff_n_layer=args.ff_n_layer,
        ff_hidden_channels=args.ff_hidden_channels,
        dropout=args.dropout,
        activation=args.activation,
    )

    # Set if generate with RPS values or not (check network architecture used later)
    rps = True

    # Generate the custom dataset
    if rps:
        dataset = MEG_Dataset(
            raw_fnames,
            parameters.duration,
            parameters.overlap,
            parameters.y_measure,
            normalize_input=True,
        )
    else:
        dataset = MEG_Dataset_no_bp(
            raw_fnames,
            parameters.duration,
            parameters.overlap,
            parameters.y_measure,
            normalize_input=True,
        )

    # split the dataset in train, test and valid sets.
    train_len, valid_len, test_len = len_split(len(dataset))
    print(
        "{} + {} + {} = {}?".format(
            train_len, valid_len, test_len, len(dataset)
        )
    )

    # train_dataset, valid_test, test_dataset = random_split(dataset, [train_len, valid_len, test_len],
    #                                                        generator=torch.Generator().manual_seed(42))
    train_dataset, valid_test, test_dataset = random_split(
        dataset, [train_len, valid_len, test_len]
    )

    # Better vizualization
    # train_valid_dataset = Subset(dataset, list(range(train_len+valid_len)))
    # test_dataset = Subset(dataset, list(range(train_len+valid_len, len(dataset))))
    #
    # train_dataset, valid_dataset = random_split(train_valid_dataset, [train_len, valid_len])

    # Initialize the dataloaders
    trainloader = DataLoader(
        train_dataset,
        batch_size=parameters.batch_size,
        shuffle=True,
        num_workers=1,
    )
    validloader = DataLoader(
        valid_test,
        batch_size=parameters.valid_batch_size,
        shuffle=True,
        num_workers=1,
    )
    testloader = DataLoader(
        test_dataset,
        batch_size=parameters.test_batch_size,
        shuffle=False,
        num_workers=1,
    )

    # Get the n_times dimension
    with torch.no_grad():
        # Changes if RPS integration or not
        if rps:
            x, _, _ = iter(trainloader).next()
        else:
            x, _ = iter(trainloader).next()

    n_times = x.shape[-1]

    # Initialize network
    # net = LeNet5(n_times)
    # net = ResNet([2, 2, 2], 64, n_times)
    # net = SCNN(parameters.s_n_layer,
    #                    parameters.s_kernel_size,
    #                    parameters.t_n_layer,
    #                    parameters.t_kernel_size,
    #                    n_times,
    #                    parameters.ff_n_layer,
    #                    parameters.ff_hidden_channels,
    #                    parameters.dropout,
    #                    parameters.max_pooling,
    #                    parameters.activation)
    # net = MNet(n_times)
    # net = RPS_SCNN(parameters.s_n_layer,
    #                    parameters.s_kernel_size,
    #                    parameters.t_n_layer,
    #                    parameters.t_kernel_size,
    #                    n_times,
    #                    parameters.ff_n_layer,
    #                    parameters.ff_hidden_channels,
    #                    parameters.dropout,
    #                    parameters.max_pooling,
    #                    parameters.activation)

    net = RPS_MNet(n_times)
    # net = RPS_MLP()
    mlp = False

    print(net)
    # Training loop or model loading
    if not skip_training:
        print("Begin training....")

        # Check the optimizer before running (different from model to model)
        optimizer = Adam(net.parameters(), lr=parameters.lr, weight_decay=5e-4)
        # optimizer = SGD(net.parameters(), lr=parameters.lr, weight_decay=5e-4)

        scheduler = ReduceLROnPlateau(optimizer, mode="min", factor=0.5,
                                      patience=15)

        print("scheduler : ", scheduler)

        loss_function = torch.nn.MSELoss()
        start_time = timer.time()
        if rps:
            if mlp:
                net, train_loss, valid_loss = train_bp_MLP(
                    net,
                    trainloader,
                    validloader,
                    optimizer,
                    scheduler,
                    loss_function,
                    parameters.device,
                    parameters.epochs,
                    parameters.patience,
                    parameters.hand,
                    model_path,
                )
            else:
                net, train_loss, valid_loss = train_bp(
                    net,
                    trainloader,
                    validloader,
                    optimizer,
                    scheduler,
                    loss_function,
                    parameters.device,
                    parameters.epochs,
                    parameters.patience,
                    parameters.hand,
                    model_path,
                )
        else:
            net, train_loss, valid_loss = train(
                net,
                trainloader,
                validloader,
                optimizer,
                scheduler,
                loss_function,
                parameters.device,
                parameters.epochs,
                parameters.patience,
                parameters.hand,
                model_path,
            )

        train_time = timer.time() - start_time
        print("Training done in {:.4f}".format(train_time))

        # visualize the loss as the network trained
        fig = plt.figure(figsize=(10, 4))
        plt.plot(
            range(1, len(train_loss) + 1), train_loss, label="Training Loss"
        )
        plt.plot(
            range(1, len(valid_loss) + 1), valid_loss, label="Validation Loss"
        )

        # find position of lowest validation loss
        minposs = valid_loss.index(min(valid_loss)) + 1
        plt.axvline(
            minposs,
            linestyle="--",
            color="r",
            label="Early Stopping Checkpoint",
        )

        plt.xlabel("epochs")
        plt.ylabel("loss")
        # plt.ylim(0, 0.5) # consistent scale
        # plt.xlim(0, len(train_loss)+1) # consistent scale
        plt.grid(True)
        plt.legend()
        plt.tight_layout()
        plt.show()
        image1 = fig
        plt.savefig(os.path.join(figure_path, "loss_plot.pdf"))

    if not skip_training:
        # Save the trained model
        save_pytorch_model(net, model_path, "Baselinemodel_SCNN_swap.pth")
    else:
        # Load the model (properly select the model architecture)
        net = RPS_MNet()
        net = load_pytorch_model(
            net, os.path.join(model_path, "model.pth"), parameters.device
        )

    # Evaluation
    print("Evaluation...")
    net.eval()
    y_pred = []
    y = []

    # if RPS integration
    with torch.no_grad():
        if rps:
            if mlp:
                for _, labels, bp in testloader:
                    labels, bp = labels.to(parameters.device), bp.to(device)
                    y.extend(list(labels[:, parameters.hand]))
                    y_pred.extend((list(net(bp))))
            else:
                for data, labels, bp in testloader:
                    data, labels, bp = (
                        data.to(parameters.device),
                        labels.to(parameters.device),
                        bp.to(device),
                    )
                    y.extend(list(labels[:, parameters.hand]))
                    y_pred.extend((list(net(data, bp))))
        else:
            for data, labels in testloader:
                data, labels = (
                    data.to(parameters.device),
                    labels.to(parameters.device),
                )
                y.extend(list(labels[:, parameters.hand]))
                y_pred.extend((list(net(data))))

    print("SCNN_swap...")
    # Calculate Evaluation measures
    mse = mean_squared_error(y, y_pred)
    rmse = mean_squared_error(y, y_pred, squared=False)
    mae = mean_absolute_error(y, y_pred)
    r2 = r2_score(y, y_pred)
    print("mean squared error {}".format(mse))
    print("root mean squared error {}".format(rmse))
    print("mean absolute error {}".format(mae))
    print("r2 score {}".format(r2))

    # plot y_new against the true value focus on 100 timepoints
    fig, ax = plt.subplots(1, 1, figsize=[10, 4])
    times = np.arange(100)
    ax.plot(times, y_pred[0:100], color="b", label="Predicted")
    ax.plot(times, y[0:100], color="r", label="True")
    ax.set_xlabel("Times")
    ax.set_ylabel("{}".format(parameters.y_measure))
    ax.set_title(
        "Sub {}, hand {}, {} prediction".format(
            str(parameters.subject_n),
            "sx" if parameters.hand == 0 else "dx",
            parameters.y_measure,
        )
    )
    plt.legend()
    plt.savefig(os.path.join(figure_path, "Times_prediction_focus.pdf"))
    plt.show()

    # plot y_new against the true value
    fig, ax = plt.subplots(1, 1, figsize=[10, 4])
    times = np.arange(len(y_pred))
    ax.plot(times, y_pred, color="b", label="Predicted")
    ax.plot(times, y, color="r", label="True")
    ax.set_xlabel("Times")
    ax.set_ylabel("{}".format(parameters.y_measure))
    ax.set_title(
        "Sub {}, hand {}, {} prediction".format(
            str(parameters.subject_n),
            "sx" if parameters.hand == 0 else "dx",
            parameters.y_measure,
        )
    )
    plt.legend()
    plt.savefig(os.path.join(figure_path, "Times_prediction.pdf"))
    plt.show()

    # scatterplot y predicted against the true value
    fig, ax = plt.subplots(1, 1, figsize=[10, 4])
    ax.scatter(np.array(y), np.array(y_pred), color="b", label="Predicted")
    ax.set_xlabel("True")
    ax.set_ylabel("Predicted")
    # plt.legend()
    plt.savefig(os.path.join(figure_path, "Scatter.pdf"))
    plt.show()

    # log the model and parameters using mlflow tracker
    with mlflow.start_run(experiment_id=args.experiment) as run:
        for key, value in vars(parameters).items():
            mlflow.log_param(key, value)

        mlflow.log_param("Time", train_time)

        mlflow.log_metric("MSE", mse)
        mlflow.log_metric("RMSE", rmse)
        mlflow.log_metric("MAE", mae)
        mlflow.log_metric("R2", r2)

        mlflow.log_artifact(os.path.join(figure_path, "Times_prediction.pdf"))
        mlflow.log_artifact(
            os.path.join(figure_path, "Times_prediction_focus.pdf")
        )
        mlflow.log_artifact(os.path.join(figure_path, "loss_plot.pdf"))
        mlflow.log_artifact(os.path.join(figure_path, "Scatter.pdf"))
        mlflow.pytorch.log_model(net, "models")
Ejemplo n.º 5
0
 def _make_optimizer(self, lr):
     self.op = Adam(self.det.get_param(), lr=lr, weight_decay=5e-4)
Ejemplo n.º 6
0
def test_RPS_MNet_2_training():
    train_set = TensorDataset(
        torch.ones([50, 1, 204, 501]),
        torch.zeros([50, 2, 2]),
        torch.ones([50, 204, 6]),
    )

    valid_set = TensorDataset(
        torch.ones([10, 1, 204, 501]),
        torch.zeros([10, 2, 2]),
        torch.ones([10, 204, 6]),
    )

    test_set = TensorDataset(
        torch.ones([10, 1, 204, 501]),
        torch.zeros([10, 2, 2]),
        torch.ones([10, 204, 6]),
    )

    print(len(train_set))

    device = "cpu"

    trainloader = DataLoader(
        train_set, batch_size=10, shuffle=False, num_workers=1
    )

    validloader = DataLoader(
        valid_set, batch_size=2, shuffle=False, num_workers=1
    )

    testloader = DataLoader(
        test_set, batch_size=2, shuffle=False, num_workers=1
    )

    epochs = 1

    with torch.no_grad():
        x, y, _ = iter(trainloader).next()
        n_times = x.shape[-1]

    # change between different network
    net = models.RPS_MNet_2(n_times)
    optimizer = Adam(net.parameters(), lr=0.00001)
    loss_function = torch.nn.MSELoss()

    print("begin training...")
    model, _, _ = train_2(
        net,
        trainloader,
        validloader,
        optimizer,
        loss_function,
        device,
        epochs,
        10,
        0,
        "",
    )

    hand = 0
    model.eval()
    y_pred = []
    y = []
    with torch.no_grad():
        for data, labels, bp in testloader:
            data, labels, bp = (
                data.to(device),
                labels.to(device),
                bp.to(device),
            )
            y.extend(list(labels[:, hand, :]))
            y_pred.extend((list(net(data, bp))))

    y = torch.stack(y)
    y_pred = torch.stack(y_pred)
    print(y[:, 0].shape)

    print("Training do not rise error.")
Ejemplo n.º 7
0
class SACAE(Agent):
    # https://arxiv.org/pdf/1910.01741.pdf
    def __init__(self,
                 algo_params,
                 env,
                 transition_tuple=None,
                 path=None,
                 seed=-1):
        # environment
        self.env = PixelPybulletGym(
            env,
            image_size=algo_params['image_resize_size'],
            crop_size=algo_params['image_crop_size'])
        self.frame_stack = algo_params['frame_stack']
        self.env = FrameStack(self.env, k=self.frame_stack)
        self.env.seed(seed)
        obs = self.env.reset()
        algo_params.update({
            'state_shape':
            obs.shape,  # make sure the shape is like (C, H, W), not (H, W, C)
            'action_dim': self.env.action_space.shape[0],
            'action_max': self.env.action_space.high,
            'action_scaling': self.env.action_space.high[0],
        })
        # training args
        self.max_env_step = algo_params['max_env_step']
        self.testing_gap = algo_params['testing_gap']
        self.testing_episodes = algo_params['testing_episodes']
        self.saving_gap = algo_params['saving_gap']

        super(SACAE, self).__init__(algo_params,
                                    transition_tuple=transition_tuple,
                                    image_obs=True,
                                    training_mode='step_based',
                                    path=path,
                                    seed=seed)
        # torch
        self.encoder = PixelEncoder(self.state_shape)
        self.encoder_target = PixelEncoder(self.state_shape)
        self.network_dict.update({
            'actor':
            StochasticConvActor(self.action_dim,
                                encoder=self.encoder,
                                detach_obs_encoder=True).to(self.device),
            'critic_1':
            ConvCritic(self.action_dim,
                       encoder=self.encoder,
                       detach_obs_encoder=False).to(self.device),
            'critic_1_target':
            ConvCritic(self.action_dim,
                       encoder=self.encoder_target,
                       detach_obs_encoder=True).to(self.device),
            'critic_2':
            ConvCritic(self.action_dim,
                       encoder=self.encoder,
                       detach_obs_encoder=False).to(self.device),
            'critic_2_target':
            ConvCritic(self.action_dim,
                       encoder=self.encoder_target,
                       detach_obs_encoder=True).to(self.device),
            'alpha':
            algo_params['alpha'],
            'log_alpha':
            T.tensor(np.log(algo_params['alpha']),
                     requires_grad=True,
                     device=self.device),
        })
        self.network_keys_to_save = ['actor']
        self.actor_optimizer = Adam(self.network_dict['actor'].parameters(),
                                    lr=self.actor_learning_rate)
        self.critic_1_optimizer = Adam(
            self.network_dict['critic_1'].parameters(),
            lr=self.critic_learning_rate)
        self.critic_2_optimizer = Adam(
            self.network_dict['critic_2'].parameters(),
            lr=self.critic_learning_rate)
        self._soft_update(self.network_dict['critic_1'],
                          self.network_dict['critic_1_target'],
                          tau=1)
        self._soft_update(self.network_dict['critic_2'],
                          self.network_dict['critic_2_target'],
                          tau=1)
        self.target_entropy = -self.action_dim
        self.alpha_optimizer = Adam([self.network_dict['log_alpha']],
                                    lr=self.actor_learning_rate)
        # augmentation args
        self.image_random_shift = T.nn.Sequential(
            T.nn.ReplicationPad2d(4), aug.RandomCrop(self.state_shape[-2:]))
        self.q_regularisation_k = algo_params['q_regularisation_k']
        # training args
        self.warmup_step = algo_params['warmup_step']
        self.actor_update_interval = algo_params['actor_update_interval']
        self.critic_target_update_interval = algo_params[
            'critic_target_update_interval']
        # statistic dict
        self.statistic_dict.update({
            'episode_return': [],
            'env_step_return': [],
            'env_step_test_return': [],
            'alpha': [],
            'policy_entropy': [],
        })

    def run(self, test=False, render=False, load_network_ep=None, sleep=0):
        if test:
            num_episode = self.testing_episodes
            if load_network_ep is not None:
                print("Loading network parameters...")
                self._load_network(ep=load_network_ep)
            print("Start testing...")
            for ep in range(num_episode):
                ep_return = self._interact(render, test, sleep=sleep)
                self.statistic_dict['episode_return'].append(ep_return)
                print("Episode %i" % ep, "return %0.1f" % ep_return)
            print("Finished testing")
        else:
            print("Start training...")
            step_returns = 0
            while self.env_step_count < self.max_env_step:
                ep_return = self._interact(render, test, sleep=sleep)
                step_returns += ep_return
                if self.env_step_count % 1000 == 0:
                    # cumulative rewards every 1000 env steps
                    self.statistic_dict['env_step_return'].append(step_returns)
                    print(
                        "Env step %i" % self.env_step_count,
                        "avg return %0.1f" %
                        self.statistic_dict['env_step_return'][-1])

                if (self.env_step_count % self.testing_gap
                        == 0) and (self.env_step_count != 0) and (not test):
                    ep_test_return = []
                    for test_ep in range(self.testing_episodes):
                        ep_test_return.append(self._interact(render,
                                                             test=True))
                    self.statistic_dict['env_step_test_return'].append(
                        sum(ep_test_return) / self.testing_episodes)
                    print(
                        "Env step %i" % self.env_step_count,
                        "test return %0.1f" %
                        (sum(ep_test_return) / self.testing_episodes))

                if (self.env_step_count % self.saving_gap
                        == 0) and (self.env_step_count != 0) and (not test):
                    self._save_network(step=self.env_step_count)

            print("Finished training")
            print("Saving statistics...")
            for key in self.statistic_dict.keys():
                if not T.is_tensor(self.statistic_dict[key][0]):
                    continue
                self.statistic_dict[key] = T.tensor(
                    self.statistic_dict[key],
                    device=self.device).cpu().numpy().tolist()
            self._save_statistics()
            self._plot_statistics(
                x_labels={
                    'env_step_return': 'Environment step (x1e3)',
                    'env_step_test_return': 'Environment step (x1e4)'
                })

    def _interact(self, render=False, test=False, sleep=0):
        done = False
        obs = self.env.reset()
        # build frame buffer for frame stack observations
        ep_return = 0
        # start a new episode
        while not done:
            if render:
                self.env.render()
            if self.env_step_count < self.warmup_step:
                action = self.env.action_space.sample()
            else:
                action = self._select_action(obs, test=test)
            new_obs, reward, done, info = self.env.step(action)
            time.sleep(sleep)
            ep_return += reward
            if not test:
                self._remember(obs, action, new_obs, reward, 1 - int(done))
                if (self.env_step_count % self.update_interval
                        == 0) and (self.env_step_count > self.warmup_step):
                    self._learn()
                self.env_step_count += 1
            obs = new_obs
            if self.env_step_count % 1000 == 0:
                break
        return ep_return

    def _select_action(self, obs, test=False):
        obs = T.tensor([obs], dtype=T.float, device=self.device)
        return self.network_dict['actor'].get_action(
            obs, mean_pi=test).detach().cpu().numpy()[0]

    def _learn(self, steps=None):
        if len(self.buffer) < self.batch_size:
            return
        if steps is None:
            steps = self.optimizer_steps

        for i in range(steps):
            if self.prioritised:
                batch, weights, inds = self.buffer.sample(self.batch_size)
                weights = T.tensor(weights, device=self.device).view(
                    self.batch_size, 1)
            else:
                batch = self.buffer.sample(self.batch_size)
                weights = T.ones(size=(self.batch_size, 1), device=self.device)
                inds = None

            vanilla_actor_inputs = T.tensor(batch.state,
                                            dtype=T.float32,
                                            device=self.device)
            actions = T.tensor(batch.action,
                               dtype=T.float32,
                               device=self.device)
            vanilla_actor_inputs_ = T.tensor(batch.next_state,
                                             dtype=T.float32,
                                             device=self.device)
            rewards = T.tensor(batch.reward,
                               dtype=T.float32,
                               device=self.device).unsqueeze(1)
            done = T.tensor(batch.done, dtype=T.float32,
                            device=self.device).unsqueeze(1)

            if self.discard_time_limit:
                done = done * 0 + 1

            average_value_target = 0
            for _ in range(self.q_regularisation_k):
                actor_inputs_ = self.image_random_shift(vanilla_actor_inputs_)
                with T.no_grad():
                    actions_, log_probs_ = self.network_dict[
                        'actor'].get_action(actor_inputs_, probs=True)
                    value_1_ = self.network_dict['critic_1_target'](
                        actor_inputs_, actions_)
                    value_2_ = self.network_dict['critic_2_target'](
                        actor_inputs_, actions_)
                    value_ = T.min(value_1_, value_2_) - (
                        self.network_dict['alpha'] * log_probs_)
                    average_value_target = average_value_target + (
                        rewards + done * self.gamma * value_)
            value_target = average_value_target / self.q_regularisation_k

            self.critic_1_optimizer.zero_grad()
            self.critic_2_optimizer.zero_grad()
            aggregated_critic_loss_1 = 0
            aggregated_critic_loss_2 = 0
            for _ in range(self.q_regularisation_k):
                actor_inputs = self.image_random_shift(vanilla_actor_inputs)

                value_estimate_1 = self.network_dict['critic_1'](actor_inputs,
                                                                 actions)
                critic_loss_1 = F.mse_loss(value_estimate_1,
                                           value_target.detach(),
                                           reduction='none')
                aggregated_critic_loss_1 = aggregated_critic_loss_1 + critic_loss_1

                value_estimate_2 = self.network_dict['critic_2'](actor_inputs,
                                                                 actions)
                critic_loss_2 = F.mse_loss(value_estimate_2,
                                           value_target.detach(),
                                           reduction='none')
                aggregated_critic_loss_2 = aggregated_critic_loss_2 + critic_loss_2

            # backward the both losses before calling .step(), or it will throw CudaRuntime error
            avg_critic_loss_1 = aggregated_critic_loss_1 / self.q_regularisation_k
            (avg_critic_loss_1 * weights).mean().backward()
            avg_critic_loss_2 = aggregated_critic_loss_2 / self.q_regularisation_k
            (avg_critic_loss_2 * weights).mean().backward()
            self.critic_1_optimizer.step()
            self.critic_2_optimizer.step()

            if self.prioritised:
                assert inds is not None
                avg_critic_loss_1 = avg_critic_loss_1.detach().cpu().numpy()
                self.buffer.update_priority(inds, np.abs(avg_critic_loss_1))

            self.statistic_dict['critic_loss'].append(
                avg_critic_loss_1.mean().detach())

            if self.optim_step_count % self.critic_target_update_interval == 0:
                self._soft_update(self.network_dict['critic_1'],
                                  self.network_dict['critic_1_target'])
                self._soft_update(self.network_dict['critic_2'],
                                  self.network_dict['critic_2_target'])

            if self.optim_step_count % self.actor_update_interval == 0:
                self.actor_optimizer.zero_grad()
                self.alpha_optimizer.zero_grad()
                aggregated_actor_loss = 0
                aggregated_alpha_loss = 0
                aggregated_log_probs = 0
                for _ in range(self.q_regularisation_k):
                    actor_inputs = self.image_random_shift(
                        vanilla_actor_inputs)
                    new_actions, new_log_probs = self.network_dict[
                        'actor'].get_action(actor_inputs, probs=True)
                    aggregated_log_probs = aggregated_log_probs + new_log_probs
                    new_values = T.min(
                        self.network_dict['critic_1'](actor_inputs,
                                                      new_actions),
                        self.network_dict['critic_2'](actor_inputs,
                                                      new_actions))
                    aggregated_actor_loss = aggregated_actor_loss + (
                        self.network_dict['alpha'] * new_log_probs -
                        new_values).mean()
                    aggregated_alpha_loss = aggregated_alpha_loss + (
                        self.network_dict['log_alpha'] *
                        (-new_log_probs -
                         self.target_entropy).detach()).mean()

                avg_actor_loss = aggregated_actor_loss / self.q_regularisation_k
                avg_actor_loss.backward()
                avg_alpha_loss = aggregated_alpha_loss / self.q_regularisation_k
                avg_alpha_loss.backward()
                self.actor_optimizer.step()
                self.alpha_optimizer.step()
                self.network_dict['alpha'] = self.network_dict[
                    'log_alpha'].exp()

                self.statistic_dict['actor_loss'].append(
                    avg_actor_loss.mean().detach())
                self.statistic_dict['alpha'].append(
                    self.network_dict['alpha'].detach())
                self.statistic_dict['policy_entropy'].append(
                    (-aggregated_log_probs /
                     self.q_regularisation_k).mean().detach())

            self.optim_step_count += 1
Ejemplo n.º 8
0
class ConvolutionalAutoEncoder(ConvolutionalNeuralNetworks):
    '''
    Convolutional Auto-Encoder.

    A stack of Convolutional Auto-Encoder (Masci, J., et al., 2011) 
    forms a convolutional neural network(CNN), which are among the most successful models 
    for supervised image classification.  Each Convolutional Auto-Encoder is trained 
    using conventional on-line gradient descent without additional regularization terms.
    
    In this library, Convolutional Auto-Encoder is also based on Encoder/Decoder scheme.
    The encoder is to the decoder what the Convolution is to the Deconvolution.
    The Deconvolution also called transposed convolutions 
    "work by swapping the forward and backward passes of a convolution." (Dumoulin, V., & Visin, F. 2016, p20.)

    References:
        - Dumoulin, V., & V,kisin, F. (2016). A guide to convolution arithmetic for deep learning. arXiv preprint arXiv:1603.07285.
        - Masci, J., Meier, U., Cireşan, D., & Schmidhuber, J. (2011, June). Stacked convolutional auto-encoders for hierarchical feature extraction. In International Conference on Artificial Neural Networks (pp. 52-59). Springer, Berlin, Heidelberg.
    '''

    # `bool` that means initialization in this class will be deferred or not.
    __init_deferred_flag = False

    def __init__(
        self,
        encoder,
        decoder,
        computable_loss,
        initializer_f=None,
        optimizer_f=None,
        encoder_optimizer_f=None,
        decoder_optimizer_f=None,
        learning_rate=1e-05,
        hidden_units_list=[],
        output_nn=None,
        hidden_dropout_rate_list=[],
        hidden_activation_list=[],
        hidden_batch_norm_list=[],
        ctx="cpu",
        regularizatable_data_list=[],
        scale=1.0,
        tied_weights_flag=True,
        init_deferred_flag=None,
        not_init_flag=False,
        wd=None,
    ):
        '''
        Init.

        Args:
            encoder:                        is-a `CNNHybrid`.
            decoder:                        is-a `CNNHybrid`.
            computable_loss:                is-a `ComputableLoss` or `mxnet.gluon.loss`.
            initializer_f:                  is-a `mxnet.initializer.Initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution.
            learning_rate:                  `float` of learning rate.

            hidden_units_list:              `list` of `mxnet.gluon.nn._conv` in hidden layers.
            output_nn:                      is-a `NNHybrid` as output layers.
                                            If `None`, last layer in `hidden_units_list` will be considered as an output layer.

            hidden_dropout_rate_list:       `list` of `float` of dropout rate in hidden layers.

            optimizer_name:                 `str` of name of optimizer.

            hidden_activation_list:         `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
            hidden_batch_norm_list:         `list` of `mxnet.gluon.nn.BatchNorm` in hidden layers.

            ctx:                            `mx.cpu()` or `mx.gpu()`.
            regularizatable_data_list:      `list` of `RegularizatableData`.
            scale:                          `float` of scaling factor for initial parameters.
            tied_weights_flag:              `bool` of flag to tied weights or not.
            wd:                             `float` of parameter of weight decay.
            init_deferred_flag:             `bool` that means initialization in this class will be deferred or not.
        '''
        if isinstance(encoder, ConvolutionalNeuralNetworks) is False:
            raise TypeError(
                "The type of `encoder` must be `ConvolutionalNeuralNetworks`.")
        if isinstance(decoder, ConvolutionalNeuralNetworks) is False:
            raise TypeError(
                "The type of `decoder` must be `ConvolutionalNeuralNetworks`.")

        if len(hidden_units_list) != len(hidden_activation_list):
            raise ValueError(
                "The length of `hidden_units_list` and `hidden_activation_list` must be equivalent."
            )

        if len(hidden_dropout_rate_list) != len(hidden_units_list):
            raise ValueError(
                "The length of `hidden_dropout_rate_list` and `hidden_units_list` must be equivalent."
            )

        if isinstance(computable_loss, ComputableLoss) is False and isinstance(
                computable_loss, nn.modules.loss._Loss) is False:
            raise TypeError(
                "The type of `computable_loss` must be `ComputableLoss` or `nn.modules.loss._Loss`."
            )

        logger = getLogger("accelbrainbase")
        self.__logger = logger

        if init_deferred_flag is None:
            init_deferred_flag = self.init_deferred_flag
        elif isinstance(init_deferred_flag, bool) is False:
            raise TypeError("The type of `init_deferred_flag` must be `bool`.")

        self.__not_init_flag = not_init_flag
        self.init_deferred_flag = True

        super().__init__(
            computable_loss=computable_loss,
            initializer_f=initializer_f,
            optimizer_f=optimizer_f,
            learning_rate=learning_rate,
            hidden_units_list=hidden_units_list,
            output_nn=output_nn,
            hidden_dropout_rate_list=hidden_dropout_rate_list,
            hidden_activation_list=hidden_activation_list,
            hidden_batch_norm_list=hidden_batch_norm_list,
            ctx=ctx,
            regularizatable_data_list=regularizatable_data_list,
            scale=scale,
        )
        self.init_deferred_flag = init_deferred_flag
        self.encoder = encoder
        self.decoder = decoder
        self.__tied_weights_flag = tied_weights_flag
        self.output_nn = output_nn
        self.optimizer_f = optimizer_f
        self.encoder_optimizer_f = encoder_optimizer_f
        self.decoder_optimizer_f = decoder_optimizer_f
        self.__computable_loss = computable_loss
        self.__learning_rate = learning_rate
        self.encoder_optimizer = None
        self.decoder_optimizer = None
        self.output_optimizer = None
        self.__ctx = ctx

        if self.init_deferred_flag is False:
            if self.__not_init_flag is False:
                if self.encoder_optimizer_f is not None:
                    self.encoder_optimizer = self.encoder_optimizer_f(
                        self.encoder.parameters(), )
                elif self.optimizer_f is not None:
                    self.encoder_optimizer = self.optimizer_f(
                        self.encoder.parameters(), )
                else:
                    self.encoder_optimizer = Adam(
                        self.encoder.parameters(),
                        lr=self.__learning_rate,
                    )

                if self.decoder_optimizer_f is not None:
                    self.decoder_optimizer = self.decoder_optimizer_f(
                        self.decoder.parameters(), )
                elif self.optimizer_f is not None:
                    self.decoder_optimizer = self.optimizer_f(
                        self.decoder.parameters(), )
                else:
                    self.decoder_optimizer = Adam(
                        self.decoder.parameters(),
                        lr=self.__learning_rate,
                    )

    def parameters(self):
        '''
        '''
        params_dict_list = [{
            "params": self.encoder.parameters(),
        }, {
            "params": self.decoder.parameters(),
        }]
        if self.output_nn is not None:
            params_dict_list.append({"params": self.output_nn.parameters()})
        return params_dict_list

    def learn(self, iteratable_data):
        '''
        Learn the observed data points
        for vector representation of the input images.

        Args:
            iteratable_data:     is-a `IteratableData`.

        '''
        if isinstance(iteratable_data, IteratableData) is False:
            raise TypeError(
                "The type of `iteratable_data` must be `IteratableData`.")

        self.__loss_list = []
        learning_rate = self.__learning_rate
        try:
            epoch = 0
            iter_n = 0
            for batch_observed_arr, batch_target_arr, test_batch_observed_arr, test_batch_target_arr in iteratable_data.generate_learned_samples(
            ):
                self.epoch = epoch
                self.batch_size = batch_observed_arr.shape[0]
                self.encoder_optimizer.zero_grad()
                self.decoder_optimizer.zero_grad()

                # rank-3
                pred_arr = self.inference(batch_observed_arr)
                loss = self.compute_loss(pred_arr, batch_target_arr)
                loss.backward()
                self.encoder_optimizer.step()
                self.decoder_optimizer.step()
                self.regularize()

                if (iter_n + 1) % int(
                        iteratable_data.iter_n / iteratable_data.epochs) == 0:
                    with torch.inference_mode():
                        # rank-3
                        test_pred_arr = self.inference(test_batch_observed_arr)
                        test_loss = self.compute_loss(test_pred_arr,
                                                      test_batch_target_arr)
                    _loss = loss.to('cpu').detach().numpy().copy()
                    _test_loss = test_loss.to('cpu').detach().numpy().copy()

                    self.__loss_list.append((_loss, _test_loss))

                    self.__logger.debug("Epochs: " + str(epoch + 1) +
                                        " Train loss: " + str(_loss) +
                                        " Test loss: " + str(_test_loss))
                    epoch += 1
                iter_n += 1

        except KeyboardInterrupt:
            self.__logger.debug("Interrupt.")

        self.__logger.debug("end. ")

    def inference(self, observed_arr):
        '''
        Inference the feature points to reconstruct the observed data points.

        Args:
            observed_arr:           rank-4 array like or sparse matrix as the observed data points.
                                    The shape is: (batch size, channel, height, width)

        Returns:
            `tensor` of inferenced feature points.
        '''
        return self(observed_arr)

    def compute_loss(self, pred_arr, labeled_arr):
        '''
        Compute loss.

        Args:
            pred_arr:       `tensor`.
            labeled_arr:    `tensor`.

        Returns:
            loss.
        '''
        return self.__computable_loss(pred_arr, labeled_arr)

    def extract_feature_points(self):
        '''
        Extract the activities in hidden layer and reset it.

        Returns:
            The `tensor` of array like or sparse matrix of feature points or virtual visible observed data points.
        '''
        return self.feature_points_arr

    def extract_learned_dict(self):
        '''
        Extract (pre-) learned parameters.

        Returns:
            `dict` of the parameters.
        '''
        params_arr_dict = {}

        params_dict = self.encoder.extract_learned_dict()
        for k in params_dict:
            params_arr_dict.setdefault(k, params_dict[k].data())

        params_dict = self.decoder.extract_learned_dict()
        for k in params_dict:
            params_arr_dict.setdefault(k, params_dict[k].data())

        return params_arr_dict

    def forward(self, x):
        '''
        Forward with Gluon API.

        Args:
            x:      `tensor` of observed data points.
        
        Returns:
            `tensor` of inferenced feature points.
        '''
        encoded_arr = self.encoder(x)
        self.feature_points_arr = encoded_arr
        if self.output_nn is None:
            decoded_arr = self.decoder(encoded_arr)
        else:
            inner_decoded_arr = self.output_nn(encoded_arr)
            decoded_arr = self.decoder(inner_decoded_arr)
        self.__pred_arr = decoded_arr
        return decoded_arr

    def regularize(self):
        '''
        Regularization.
        '''
        self.encoder.regularize()
        self.decoder.regularize()
        self.__tie_weights()

    def __tie_weights(self):
        if self.__tied_weights_flag is True:
            encoder_params_dict = self.encoder.extract_learned_dict()
            decoder_params_dict = self.decoder.extract_learned_dict()
            encoder_weight_keys_list = [
                key for key in encoder_params_dict.keys()
                if "hidden_units_list" in key and "weight" in key
            ]
            decoder_weight_keys_list = [
                key for key in decoder_params_dict.keys()
                if "hidden_units_list" in key and "weight" in key
            ]

            if len(encoder_weight_keys_list) != len(decoder_weight_keys_list):
                raise ValueError("The number of layers is invalid.")

            for i in range(len(self.encoder.hidden_units_list)):
                encoder_layer = i
                decoder_layer = len(self.encoder.hidden_units_list) - i - 1
                encoder_weight_keys, decoder_weight_keys = None, None
                for _encoder_weight_keys in encoder_weight_keys_list:
                    if "hidden_units_list." + str(
                            encoder_layer) + ".weight" in _encoder_weight_keys:
                        encoder_weight_keys = _encoder_weight_keys
                        break

                for _decoder_weight_keys in decoder_weight_keys_list:
                    if "hidden_units_list." + str(
                            decoder_layer) + ".weight" in _decoder_weight_keys:
                        decoder_weight_keys = _decoder_weight_keys
                        break

                if encoder_weight_keys is not None and decoder_weight_keys is not None:
                    try:
                        decoder_params_dict[
                            decoder_weight_keys] = encoder_params_dict[
                                encoder_weight_keys]
                    except AssertionError:
                        raise ValueError(
                            "The shapes of weight matrixs must be equivalents in encoder layer "
                            + str(encoder_layer) + " and decoder layer " +
                            str(decoder_layer))

            for k, params in decoder_params_dict.items():
                if k in decoder_weight_keys_list:
                    self.decoder.load_state_dict({k: params}, strict=False)

    def set_readonly(self, value):
        ''' setter '''
        raise TypeError("This property must be read-only.")

    def get_loss_arr(self):
        ''' getter for losses. '''
        return np.array(self.__loss_list)

    loss_arr = property(get_loss_arr, set_readonly)

    def get_init_deferred_flag(self):
        ''' getter for `bool` that means initialization in this class will be deferred or not.'''
        return self.__init_deferred_flag

    def set_init_deferred_flag(self, value):
        ''' setter for `bool` that means initialization in this class will be deferred or not. '''
        self.__init_deferred_flag = value

    init_deferred_flag = property(get_init_deferred_flag,
                                  set_init_deferred_flag)

    def get_batch_size(self):
        ''' getter for batch size.'''
        return self.__batch_size

    def set_batch_size(self, value):
        ''' setter for batch size.'''
        self.__batch_size = value

    batch_size = property(get_batch_size, set_batch_size)

    def get_computable_loss(self):
        ''' getter for `ComputableLoss`.'''
        return self.__computable_loss

    def set_computable_loss(self, value):
        ''' setter for `ComputableLoss`.'''
        self.__computable_loss = value

    computable_loss = property(get_computable_loss, set_computable_loss)
Ejemplo n.º 9
0
    def __init__(
        self,
        encoder,
        decoder,
        computable_loss,
        initializer_f=None,
        optimizer_f=None,
        encoder_optimizer_f=None,
        decoder_optimizer_f=None,
        learning_rate=1e-05,
        hidden_units_list=[],
        output_nn=None,
        hidden_dropout_rate_list=[],
        hidden_activation_list=[],
        hidden_batch_norm_list=[],
        ctx="cpu",
        regularizatable_data_list=[],
        scale=1.0,
        tied_weights_flag=True,
        init_deferred_flag=None,
        not_init_flag=False,
        wd=None,
    ):
        '''
        Init.

        Args:
            encoder:                        is-a `CNNHybrid`.
            decoder:                        is-a `CNNHybrid`.
            computable_loss:                is-a `ComputableLoss` or `mxnet.gluon.loss`.
            initializer_f:                  is-a `mxnet.initializer.Initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution.
            learning_rate:                  `float` of learning rate.

            hidden_units_list:              `list` of `mxnet.gluon.nn._conv` in hidden layers.
            output_nn:                      is-a `NNHybrid` as output layers.
                                            If `None`, last layer in `hidden_units_list` will be considered as an output layer.

            hidden_dropout_rate_list:       `list` of `float` of dropout rate in hidden layers.

            optimizer_name:                 `str` of name of optimizer.

            hidden_activation_list:         `list` of act_type` in `mxnet.ndarray.Activation` or `mxnet.symbol.Activation` in input gate.
            hidden_batch_norm_list:         `list` of `mxnet.gluon.nn.BatchNorm` in hidden layers.

            ctx:                            `mx.cpu()` or `mx.gpu()`.
            regularizatable_data_list:      `list` of `RegularizatableData`.
            scale:                          `float` of scaling factor for initial parameters.
            tied_weights_flag:              `bool` of flag to tied weights or not.
            wd:                             `float` of parameter of weight decay.
            init_deferred_flag:             `bool` that means initialization in this class will be deferred or not.
        '''
        if isinstance(encoder, ConvolutionalNeuralNetworks) is False:
            raise TypeError(
                "The type of `encoder` must be `ConvolutionalNeuralNetworks`.")
        if isinstance(decoder, ConvolutionalNeuralNetworks) is False:
            raise TypeError(
                "The type of `decoder` must be `ConvolutionalNeuralNetworks`.")

        if len(hidden_units_list) != len(hidden_activation_list):
            raise ValueError(
                "The length of `hidden_units_list` and `hidden_activation_list` must be equivalent."
            )

        if len(hidden_dropout_rate_list) != len(hidden_units_list):
            raise ValueError(
                "The length of `hidden_dropout_rate_list` and `hidden_units_list` must be equivalent."
            )

        if isinstance(computable_loss, ComputableLoss) is False and isinstance(
                computable_loss, nn.modules.loss._Loss) is False:
            raise TypeError(
                "The type of `computable_loss` must be `ComputableLoss` or `nn.modules.loss._Loss`."
            )

        logger = getLogger("accelbrainbase")
        self.__logger = logger

        if init_deferred_flag is None:
            init_deferred_flag = self.init_deferred_flag
        elif isinstance(init_deferred_flag, bool) is False:
            raise TypeError("The type of `init_deferred_flag` must be `bool`.")

        self.__not_init_flag = not_init_flag
        self.init_deferred_flag = True

        super().__init__(
            computable_loss=computable_loss,
            initializer_f=initializer_f,
            optimizer_f=optimizer_f,
            learning_rate=learning_rate,
            hidden_units_list=hidden_units_list,
            output_nn=output_nn,
            hidden_dropout_rate_list=hidden_dropout_rate_list,
            hidden_activation_list=hidden_activation_list,
            hidden_batch_norm_list=hidden_batch_norm_list,
            ctx=ctx,
            regularizatable_data_list=regularizatable_data_list,
            scale=scale,
        )
        self.init_deferred_flag = init_deferred_flag
        self.encoder = encoder
        self.decoder = decoder
        self.__tied_weights_flag = tied_weights_flag
        self.output_nn = output_nn
        self.optimizer_f = optimizer_f
        self.encoder_optimizer_f = encoder_optimizer_f
        self.decoder_optimizer_f = decoder_optimizer_f
        self.__computable_loss = computable_loss
        self.__learning_rate = learning_rate
        self.encoder_optimizer = None
        self.decoder_optimizer = None
        self.output_optimizer = None
        self.__ctx = ctx

        if self.init_deferred_flag is False:
            if self.__not_init_flag is False:
                if self.encoder_optimizer_f is not None:
                    self.encoder_optimizer = self.encoder_optimizer_f(
                        self.encoder.parameters(), )
                elif self.optimizer_f is not None:
                    self.encoder_optimizer = self.optimizer_f(
                        self.encoder.parameters(), )
                else:
                    self.encoder_optimizer = Adam(
                        self.encoder.parameters(),
                        lr=self.__learning_rate,
                    )

                if self.decoder_optimizer_f is not None:
                    self.decoder_optimizer = self.decoder_optimizer_f(
                        self.decoder.parameters(), )
                elif self.optimizer_f is not None:
                    self.decoder_optimizer = self.optimizer_f(
                        self.decoder.parameters(), )
                else:
                    self.decoder_optimizer = Adam(
                        self.decoder.parameters(),
                        lr=self.__learning_rate,
                    )
Ejemplo n.º 10
0
def main(args):

    data_dir = args.data_dir
    figure_path = args.figure_dir
    model_path = args.model_dir

    file_name = "ball_left_mean.npz"

    # Set skip_training to False if the model has to be trained, to True if the model has to be loaded.
    skip_training = False

    # Set the torch device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Device = {}".format(device))

    parameters = Params_cross(subject_n=args.sub,
                              hand=args.hand,
                              batch_size=args.batch_size,
                              valid_batch_size=args.batch_size_valid,
                              test_batch_size=args.batch_size_test,
                              epochs=args.epochs,
                              lr=args.learning_rate,
                              wd=args.weight_decay,
                              patience=args.patience,
                              device=device,
                              y_measure=args.y_measure,
                              desc=args.desc
                              )

    # Creat train dataset

    train_dataset = MEG_Within_Dataset_ivan(data_dir, parameters.subject_n,
                                            parameters.hand, mode="train")

    trainloader = DataLoader(train_dataset, batch_size=parameters.batch_size,
                             shuffle=True, num_workers=1)

    # local
    train_dataset = torch.utils.data.TensorDataset(torch.randn((20, 1, 204, 250)),
                               torch.ones(20),
                               torch.zeros(20))

    trainloader = DataLoader(train_dataset, batch_size=10,
                             shuffle=True, num_workers=1)

    print("train set {}".format(len(train_dataset)))

    nz = 1000
    netG = Generator(nz=nz, ngf=12, nc=1)
    netD = Discriminator(nc=1, ndf=12)

    netD = netD.to(device)
    netG = netG.to(device)

    print(netG)
    print(netD)

    g_params = 0
    d_param = 0
    print("Generator")
    for name, parameter in netG.named_parameters():
        param = parameter.numel()

        print("param {} : {}".format(name, param if parameter.requires_grad
        else 0))
        g_params += param
    print(f"Generator Trainable Params: {g_params}")

    print("Discriminator")
    for name, parameter in netD.named_parameters():
        param = parameter.numel()
        print("param {} : {}".format(name, param if parameter.requires_grad
        else 0))
        d_param += param
    print(f"Discriminator Trainable Params: {d_param}")

    print(f"Total Trainable Params: {g_params + d_param}")


    if not skip_training:
        print("Begin training....")

        optimizerG = Adam(netG.parameters(), lr=parameters.lr,
                          betas=(0.5, 0.999))
        optimizerD = Adam(netD.parameters(), lr=parameters.lr,
                          betas=(0.5, 0.999))

        start_time = timer.time()

        netG, netD, lossG, lossDF, lossDR, D_fake, D_real = train_gan(netG,
              netD, trainloader, optimizerG, optimizerD, device,
              parameters.epochs, parameters.patience, model_path, nz)

        train_time = timer.time() - start_time
        print("Training done in {:.4f}".format(train_time))

        save_pytorch_model(netG, model_path, "'dcgan_g.pth")
        save_pytorch_model(netD, model_path, "dcgan_d.pth")

    # Evaluation, right now print a rps from data randomly generated.
    print("Evaluation...")
    print("last loss of generator : ", lossG)
    print("last loss of discriminator on _fake : ", lossDF)
    print("last loss of discriminator on _real : ", lossDR)
    print('D_fake', D_fake)
    print('D_reali', D_real)


    netG.eval()
    z_val = torch.randn((4, nz, 1, 1)).to(device)

    _fake_val = netG.forward()

    bands = [(1, 4), (4, 8), (8, 10), (10, 13), (13, 30), (30, 70)]
    bp_train = bandpower_multi(_fake_val, fs=250, bands=bands,
                               nperseg=250 / 2, relative=True)

    epoch = range(_fake_val.shape[0])
    fig, axs = plt.subplots(2, 2, figsize=[12, 6])
    fig.suptitle("RPS_train")
    for e, ax in zip(epoch, axs.ravel()):
        im = ax.pcolormesh(bp_train[e, ...])
        fig.colorbar(im, ax=ax)
        ax.set_ylabel("Channels")
        # ax.set_xlabel("Bands")
        ax.locator_params(axis="y", nbins=5)
        ax.set_xticks([0.5, 1.5, 2.5, 3.5, 4.5, 5.5])
        ax.set_xticklabels(["\u03B4", "\u03B8", "low-\u03B1",
                            "high-\u03B1", "\u03B2", "low-\u03B3"], )
        # ax.set_title("target: {}".format(sample_y_train[e]))
    # plt.savefig(os.path.join(figure_dir, "RPS_epoch_{}_hand_{}.pdf"
    #                          .format(epoch, "right" if hand == 1 else "left")))
    plt.tight_layout()
    plt.show()
    def __init__(self, **args):

        torch.manual_seed(1)

        if args["num_threads"] > 0:
            torch.set_num_threads(args["num_threads"])

        print(args)

        self.hidden_size = args["hidden_size"]
        self.data = Data().get_data(args["data"])

        self.input_dim = self.data[0][0].shape[1]
        self.output_size = self.data[0][1].shape[1]

        self.optimizer_name = args["optimizer"]

        self.time_decay_power = args["time_decay_power"]

        self.alpha = args["alpha"]
        self.w = args["window_size"]  # window size
        self.clip_hh = args["clip_hh"]
        self.clip_ih = args["clip_ih"]
        self.lr_hh = args["lr_hh"]
        self.lr_ih = args["lr_ih"]
        self.lr_oh = args["lr_oh"]

        self.append_input = args["append_input"]

        if self.optimizer_name == "windowed_ogd":
            self.lr_hh /= (4 * self.hidden_size)
            self.lr_ih /= (4 * self.input_dim)

        self.time_decay = args["time_decay"]
        self.recurrent_cell = args["recurrent_cell"]
        self.n = args[
            "truncation"]  #  backpropagation truncation including window. include gradients from last cell to 30th from end cell.
        self.mlflow = args["mlflow"]

        self.model = OneStepRNN(hidden_size=self.hidden_size,
                                input_size=self.input_dim,
                                recurrent_cell=self.recurrent_cell,
                                output_size=self.output_size,
                                append_input=self.append_input)

        if self.optimizer_name == "sgd":
            self.optimizer = SGD([{
                "name": "weight_hh",
                "params": self.model.weight_hh,
                "lr": self.lr_hh
            }, {
                "name": "weight_ih",
                "params": self.model.weight_ih,
                "lr": self.lr_ih
            }, {
                "name": "weight_oh",
                "params": self.model.weight_oh,
                "lr": self.lr_oh
            }])
        elif self.optimizer_name == "adam":
            self.optimizer = Adam([{
                "name": "weight_hh",
                "params": self.model.weight_hh,
                "lr": self.lr_hh
            }, {
                "name": "weight_ih",
                "params": self.model.weight_ih,
                "lr": self.lr_ih
            }, {
                "name": "weight_oh",
                "params": self.model.weight_oh,
                "lr": self.lr_oh
            }])
        elif self.optimizer_name == "rmsprop":
            self.optimizer = RMSprop([{
                "name": "weight_hh",
                "params": self.model.weight_hh,
                "lr": self.lr_hh
            }, {
                "name": "weight_ih",
                "params": self.model.weight_ih,
                "lr": self.lr_ih
            }, {
                "name": "weight_oh",
                "params": self.model.weight_oh,
                "lr": self.lr_oh
            }])
        elif self.optimizer_name == "windowed_ogd":
            self.optimizer = WindowedOGD([{
                "name": "weight_hh",
                "params": self.model.weight_hh,
                "lr": self.lr_hh,
                "clip": self.clip_hh
            }, {
                "name": "weight_ih",
                "params": self.model.weight_ih,
                "lr": self.lr_ih,
                "clip": self.clip_ih
            }, {
                "name": "weight_oh",
                "params": self.model.weight_oh,
                "lr": self.lr_oh
            }])

        self.init_hidden = torch.autograd.Variable(torch.zeros(
            1, self.hidden_size),
                                                   requires_grad=False)

        self.final_losses = []
        self.loss_normalization_factor = np.sum(
            np.fromiter((i**self.alpha for i in range(1, self.w + 1)),
                        np.float))

        if self.optimizer_name == "windowed_ogd" and self.n < self.w:
            raise RuntimeError

        self.experiment_name = args["experiment_name"]

        self.args = args

        self.mlflow_uuid = None

        self.output_decay = args["output_decay"]
        self.log_hessian = args["log_hessian"]
        self.log_difference = args["log_differences"]
        self.log_hessian_every = args["log_hessian_every"]

        if self.log_difference:
            self.log_differences_dict = {
                "W_ih": self.model.weight_ih.clone().data.numpy(),
                "dW_ih": np.zeros(self.model.weight_ih.shape),
                "W_hh": self.model.weight_hh.clone().data.numpy(),
                "dW_hh": np.zeros(self.model.weight_hh.shape),
            }
Ejemplo n.º 12
0
    #                   parameters.ff_hidden_channels,
    #                   parameters.dropout,
    #                   parameters.max_pooling,
    #                   parameters.activation)

    # net = RPS_MNet_ECoG(n_times)
    net = RPS_MLP(in_channel=62, n_bands=6)
    mlp = True

    print(net)

    # Training loop or model loading
    if not skip_training:
        print("Begin training...")

        optimizer = Adam(net.parameters(), lr=parameters.lr)
        # optimizer = SGD(net.parameters(), lr=parameters.lr, weight_decay=5e-4)

        loss_function = torch.nn.MSELoss()

        start_time = timer.time()
        if rps:
            if mlp:
                net, train_loss, valid_loss = train_bp_MLP(
                    net,
                    trainloader,
                    validloader,
                    optimizer,
                    loss_function,
                    parameters.device,
                    parameters.epochs,
Ejemplo n.º 13
0
def train(model, SRC, TRG, MODEL_PATH, FORCE_MAX_LEN=50):
    model.train()
    optimizer = Adam(model.parameters(), lr=hp.LR, betas=(0.9, 0.98), eps=1e-9)
    criterion = CrossEntropyLoss(ignore_index=TRG.vocab.stoi["<pad>"])

    for epoch in tqdm(range(hp.EPOCHS)):

        for step, batch in enumerate(train_iter):
            global_step = epoch * len(train_iter) + step

            model.train()
            optimizer.zero_grad()
            optimizer = custom_lr_optimizer(optimizer, global_step)

            src = batch.src.T
            trg = batch.trg.T

            trg_input = trg[:, :-1]

            preds, _, _, _ = model(src, trg_input)
            ys = trg[:, 1:]

            # Should this be permuted?
            loss = criterion(preds.permute(0, 2, 1), ys)
            loss.mean().backward()
            optimizer.step()

            if global_step % 50 == 0:
                print("#" * 90)

                rand_index = random.randrange(hp.BATCH_SIZE)

                model.eval()

                v = next(iter(val_iter))
                v_src, v_trg = v.src.T, v.trg.T

                v_trg_inp = v_trg[:, :-1].detach()
                v_trg_real = v_trg[:, 1:].detach()

                v_predictions, _, _, _ = model(v_src, v_trg_inp)
                max_args = v_predictions[rand_index].argmax(-1)
                print("For random element in VALIDATION batch (real/pred)...")
                print([
                    TRG.vocab.itos[word_idx]
                    for word_idx in v_trg_real[rand_index, :]
                ])
                print([TRG.vocab.itos[word_idx] for word_idx in max_args])

                print("Length til first <PAD> (real -> pred)...")
                try:
                    pred_PAD_idx = max_args.tolist().index(3)
                except:
                    pred_PAD_idx = None

                print(v_trg_real[rand_index, :].tolist().index(3), "  --->  ",
                      pred_PAD_idx)

                val_loss = criterion(v_predictions.permute(0, 2, 1),
                                     v_trg_real)
                print("TRAINING LOSS:", loss.mean().item())
                print("VALIDATION LOSS:", val_loss.mean().item())

                print("#" * 90)

                writer.add_scalar("Training Loss",
                                  loss.mean().detach().item(), global_step)
                writer.add_scalar("Validation Loss",
                                  val_loss.mean().detach().item(), global_step)
        torch.save(model, MODEL_PATH)
Ejemplo n.º 14
0
def train(shared_model,
          task,
          batch_size,
          train_steps,
          gpu_id,
          start,
          restore,
          counter,
          barrier=None,
          save_interval=None,
          eval_interval=None,
          log=True):
    log_dir = 'logs/%s' % task
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    if (log == True):
        summary_writer = SummaryWriter(log_dir)
    # Create local model
    #再次设置了随机的种子
    torch.manual_seed(int(random.random() * 1000))
    if gpu_id > 0:
        model = omninet.OmniNet(gpu_id=gpu_id)
        model = model.cuda(gpu_id)
    else:
        #For GPU 0, use the shared model always
        model = shared_model

    if task == 'caption':
        DL, val_dl = dl.coco_cap_batchgen(caption_dir=caption_dir,
                                          image_dir=coco_images,
                                          num_workers=8,
                                          batch_size=batch_size)

        optimizer = ScheduledOptim(Adam(filter(lambda x: x.requires_grad,
                                               shared_model.parameters()),
                                        betas=(0.9, 0.98),
                                        eps=1e-09),
                                   512,
                                   16000,
                                   restore,
                                   init_lr=0.02)
    elif task == 'vqa':
        DL, val_dl = dl.vqa_batchgen(vqa_dir,
                                     coco_images,
                                     num_workers=8,
                                     batch_size=batch_size)
        optimizer = ScheduledOptim(Adam(filter(lambda x: x.requires_grad,
                                               shared_model.parameters()),
                                        betas=(0.9, 0.98),
                                        eps=1e-09),
                                   512,
                                   16000,
                                   restore,
                                   max_lr=0.0001,
                                   init_lr=0.02)
    elif task == 'hmdb':
        DL, val_dl = dl.hmdb_batchgen(hmdb_data_dir,
                                      hmdb_process_dir,
                                      num_workers=8,
                                      batch_size=batch_size,
                                      test_batch_size=int(batch_size / 4),
                                      clip_len=16)
        optimizer = ScheduledOptim(Adam(filter(lambda x: x.requires_grad,
                                               shared_model.parameters()),
                                        betas=(0.9, 0.98),
                                        eps=1e-09),
                                   512,
                                   16000,
                                   restore,
                                   max_lr=0.0001,
                                   init_lr=0.02)
    elif task == 'penn':
        DL, val_dl, test_dl = dl.penn_dataloader(
            penn_data_dir,
            batch_size=batch_size,
            test_batch_size=int(batch_size / 2),
            num_workers=4,
            vocab_file='conf/penn_vocab.json')
        optimizer = ScheduledOptim(Adam(filter(lambda x: x.requires_grad,
                                               shared_model.parameters()),
                                        betas=(0.9, 0.98),
                                        eps=1e-09),
                                   512,
                                   16000,
                                   restore,
                                   init_lr=0.02)

    model = model.train()

    for i in range(start, train_steps):
        #model.zero_grad(), optimizer.zero_grad(), 两种方式都是把模型中参数的梯度设为0, https://zhuanlan.zhihu.com/p/62387047
        model.zero_grad()
        if barrier is not None:
            barrier.wait()

#https://zhuanlan.zhihu.com/p/38056115,保存与加载模型
        if gpu_id > 0:
            with torch.cuda.device(gpu_id):
                model.load_state_dict(shared_model.state_dict())

        # Calculate loss
        step = counter.increment()
        if task == 'caption':
            if (log and eval_interval is not None and i % eval_interval == 0):
                model = model.eval()
                val_loss = 0
                val_acc = 0
                print('-' * 100)
                print('Evaluation step')
                for b in tqdm(val_dl):
                    imgs = b['img']
                    if gpu_id >= 0:
                        imgs = imgs.cuda(device=gpu_id)
                    captions = b['cap']
                    # In val mode we do not pass the targets for prediction. We use it only for loss calculation
                    _, loss, acc = r.image_caption(model,
                                                   imgs,
                                                   targets=captions,
                                                   mode='val',
                                                   return_str_preds=True)
                    val_loss += float(loss.detach().cpu().numpy())
                    val_acc += acc
                val_loss /= len(val_dl)
                val_acc = (val_acc / len(val_dl))
                summary_writer.add_scalar('Val_loss', val_loss, step)
                print('Step %d, COCO validation loss: %f, Accuracy %f %%' %
                      (step, val_loss, val_acc))
                print('-' * 100)
                model = model.train()
            batch = next(DL)
            if gpu_id >= 0:
                imgs = batch['img'].cuda(device=gpu_id)
            else:
                imgs = batch['img']
            captions = batch['cap']
            _, loss, acc = r.image_caption(model, imgs, targets=captions)
            loss.backward()
            loss = loss.detach()
            if log:
                summary_writer.add_scalar('Loss', loss, step)
            print('Step %d, Caption Loss: %f, Accuracy:  %f %%' %
                  (step, loss, acc))

        elif task == 'vqa':
            if (log and eval_interval is not None and i % eval_interval == 0):
                model = model.eval()
                val_loss = 0
                val_acc = 0
                print('-' * 100)
                print('Evaluation step')
                for b in tqdm(val_dl):
                    imgs = b['img']
                    answers = b['ans']
                    if gpu_id >= 0:
                        imgs = imgs.cuda(device=gpu_id)
                        answers = answers.cuda(device=gpu_id)
                    questions = b['ques']
                    # In val mode we do not pass the targets for prediction. We use it only for loss calculation
                    pred, loss, acc = r.vqa(model,
                                            imgs,
                                            questions,
                                            targets=answers,
                                            mode='val',
                                            return_str_preds=True)
                    val_loss += float(loss.detach().cpu().numpy())
                    val_acc += acc
                val_loss /= len(val_dl)
                val_acc = (val_acc / len(val_dl))
                summary_writer.add_scalar('Val_loss', val_loss, step)
                print('Step %d, VQA validation loss: %f, Accuracy %f %%' %
                      (step, val_loss, val_acc))
                print('-' * 100)
                model = model.train()
                continue
            batch = next(DL)
            if gpu_id >= 0:
                imgs = batch['img'].cuda(device=gpu_id)
                answers = batch['ans'].cuda(device=gpu_id)
            else:
                imgs = batch['img']
                answers = batch['ans']
            questions = batch['ques']
            _, loss, acc = r.vqa(model, imgs, questions, targets=answers)
            loss.backward()
            loss = loss.detach()
            if log:
                summary_writer.add_scalar('Loss', loss, step)
            print('Step %d, VQA Loss: %f, Accuracy:  %f %%' %
                  (step, loss, acc))
        elif task == 'hmdb':
            if (log and eval_interval is not None and i % eval_interval == 0):
                model = model.eval()
                val_loss = 0
                val_acc = 0
                print('-' * 100)
                print('Evaluation step')
                for b in tqdm(val_dl):
                    vid, labels = b
                    if gpu_id >= 0:
                        vid = vid.cuda(device=gpu_id)
                        labels = labels.cuda(device=gpu_id)
                    _, loss, acc = r.hmdb(model,
                                          vid,
                                          targets=labels,
                                          mode='val')
                    val_loss += float(loss.detach().cpu().numpy())
                    val_acc += acc
                val_loss /= len(val_dl)
                val_acc = (val_acc / len(val_dl))
                summary_writer.add_scalar('Val_loss', val_loss, step)
                print('Step %d, HMDB validation loss: %f, Accuracy %f %%' %
                      (step, val_loss, val_acc))
                print('-' * 100)
                model = model.train()
                continue
            vid, labels = next(DL)
            if gpu_id >= 0:
                vid = vid.cuda(device=gpu_id)
                labels = labels.cuda(device=gpu_id)
            _, loss, acc = r.hmdb(model,
                                  vid,
                                  targets=labels,
                                  return_str_preds=True)
            loss.backward()
            loss = loss.detach()
            if log:
                summary_writer.add_scalar('Loss', loss, step)
            print('Step %d, HMDB Loss: %f, Accuracy:  %f %%' %
                  (step, loss, acc))

        elif task == 'penn':
            if (log and eval_interval is not None and i % eval_interval == 0):
                model = model.eval()
                val_loss = 0
                val_acc = 0
                print('-' * 100)
                print('Evaluation step')
                for b in tqdm(test_dl):
                    en = b['text']
                    targets = b['tokens']
                    pad_id = b['pad_id']
                    pad_mask = b['pad_mask']
                    if gpu_id >= 0:
                        targets = targets.to(gpu_id)
                        pad_mask = pad_mask.to(gpu_id)
                    _, loss, acc = r.penn(model,
                                          en,
                                          target_pad_mask=pad_mask,
                                          pad_id=pad_id,
                                          targets=targets,
                                          mode='val',
                                          return_str_preds=True)
                    loss = loss.detach()
                    val_loss += float(loss.cpu().numpy())
                    val_acc += acc
                val_loss /= len(val_dl)
                val_acc = (val_acc / len(val_dl))
                summary_writer.add_scalar('Val_loss', val_loss, step)
                print('Step %d, PENN validation loss: %f, Accuracy %f %%' %
                      (step, val_loss, val_acc))
                print('-' * 100)
                model = model.train()
            batch = next(DL)
            en = batch['text']
            targets = batch['tokens']
            pad_id = batch['pad_id']
            pad_mask = batch['pad_mask']
            if gpu_id >= 0:
                targets = targets.to(gpu_id)
                pad_mask = pad_mask.to(gpu_id)
            _, loss, acc = r.penn(model,
                                  en,
                                  pad_id=pad_id,
                                  targets=targets,
                                  target_pad_mask=pad_mask)
            loss.backward()
            loss = loss.detach()
            if log:
                summary_writer.add_scalar('Loss', loss, step)
            print('Step %d, PENN Loss: %f, Accuracy:  %f %%' %
                  (step, loss, acc))

        # End Calculate loss
        if gpu_id > 0:
            ensure_shared_grads(model, shared_model, gpu_id)
        optimizer.step()
        # Save model
        if (save_interval != None and (i + 1) % save_interval == 0):
            shared_model.save(model_save_path, step)
        sys.stdout.flush()
Ejemplo n.º 15
0
class D4PGLearner(Learner):
    def __init__(self, algo_params, env, queues, path=None, seed=0):
        # environment
        self.env = env
        self.env.seed(seed)
        obs = self.env.reset()
        algo_params.update({'state_dim': obs.shape[0],
                            'action_dim': self.env.action_space.shape[0],
                            'action_max': self.env.action_space.high,
                            'action_scaling': self.env.action_space.high[0],
                            'init_input_means': None,
                            'init_input_vars': None
                            })
        self.env.close()

        super(D4PGLearner, self).__init__(algo_params, queues, path=path, seed=seed)
        # categorical distribution atoms
        self.num_atoms = algo_params['num_atoms']
        self.value_max = algo_params['value_max']
        self.value_min = algo_params['value_min']
        self.delta_z = (self.value_max - self.value_min) / (self.num_atoms - 1)
        self.support = T.linspace(self.value_min, self.value_max, steps=self.num_atoms, device=self.device)

        self.network_dict.update({
            'actor': Actor(self.state_dim, self.action_dim).to(self.device),
            'actor_target': Actor(self.state_dim, self.action_dim).to(self.device),
            'critic': Critic(self.state_dim + self.action_dim, self.num_atoms, softmax=True).to(self.device),
            'critic_target': Critic(self.state_dim + self.action_dim, self.num_atoms, softmax=True).to(self.device)
        })
        self.actor_optimizer = Adam(self.network_dict['actor'].parameters(), lr=self.actor_learning_rate)
        self._soft_update(self.network_dict['actor'], self.network_dict['actor_target'], tau=1)
        self.critic_optimizer = Adam(self.network_dict['critic'].parameters(), lr=self.critic_learning_rate,
                                     weight_decay=algo_params['Q_weight_decay'])
        self._soft_update(self.network_dict['critic'], self.network_dict['critic_target'], tau=1)
        self._upload_learner_networks(keys=['actor_target', 'critic_target'])

    def run(self):
        print('Learner on')
        while self.queues['learner_step_count'].value < self.learner_steps:
            try:
                batch = self.queues['batch_queue'].get_nowait()
            except queue.Empty:
                # print("empty batch queue")
                continue
            self._learn(batch=batch)
            if self.queues['learner_step_count'].value % self.learner_upload_gap == 0:
                self._upload_learner_networks(keys=['actor_target', 'critic_target'])
        print("Saving learner statistics...")
        self._plot_statistics(keys=['actor_loss', 'critic_loss'], save_to_file=True)
        print('Learner shutdown')

    def _learn(self, steps=None, batch=None):
        if batch is None:
            return
        if steps is None:
            steps = self.optimizer_steps

        for i in range(steps):
            if self.prioritised:
                state, action, next_state, reward, done, weights, inds = batch
                weights = T.as_tensor(weights, device=self.device).view(self.batch_size, 1)
            else:
                state, action, next_state, reward, done = batch
                weights = T.ones(size=(self.batch_size, 1), device=self.device)
                inds = None

            actor_inputs = self.normalizer(state)
            actor_inputs = T.as_tensor(actor_inputs, dtype=T.float32, device=self.device)
            actions = T.as_tensor(action, dtype=T.float32, device=self.device)
            critic_inputs = T.cat((actor_inputs, actions), dim=1)
            actor_inputs_ = self.normalizer(next_state)
            actor_inputs_ = T.as_tensor(actor_inputs_, dtype=T.float32, device=self.device)
            rewards = T.as_tensor(reward, dtype=T.float32, device=self.device)
            done = T.as_tensor(done, dtype=T.float32, device=self.device)

            if self.discard_time_limit:
                done = done * 0 + 1

            with T.no_grad():
                actions_ = self.network_dict['actor_target'](actor_inputs_)
                critic_inputs_ = T.cat((actor_inputs_, actions_), dim=1)
                value_dist_ = self.network_dict['critic_target'](critic_inputs_)
                value_dist_target = project_value_distribution(value_dist_, rewards, done, self.num_atoms, self.value_max, self.value_min, self.delta_z, self.gamma)
                value_dist_target = T.as_tensor(value_dist_target, device=self.device)

            self.critic_optimizer.zero_grad()
            value_dist_estimate = self.network_dict['critic'](critic_inputs)
            critic_loss = F.binary_cross_entropy(value_dist_estimate, value_dist_target, reduction='none').sum(dim=1)
            (critic_loss * weights).mean().backward()
            self.critic_optimizer.step()

            if self.prioritised:
                try:
                    self.queues['priority_queue'].put((inds, np.abs(critic_loss.cpu().detach().numpy())))
                except queue.Full:
                    pass

            self.actor_optimizer.zero_grad()
            new_actions = self.network_dict['actor'](actor_inputs)
            critic_eval_inputs = T.cat((actor_inputs, new_actions), dim=1).to(self.device)
            # take the expectation of the value distribution as the policy loss
            actor_loss = -(self.network_dict['critic'](critic_eval_inputs) * self.support)
            actor_loss = actor_loss.sum(dim=1)
            actor_loss.mean().backward()
            self.actor_optimizer.step()

            self._soft_update(self.network_dict['actor'], self.network_dict['actor_target'])
            self._soft_update(self.network_dict['critic'], self.network_dict['critic_target'])

            self.statistic_dict['critic_loss'].append(critic_loss.detach().mean())
            self.statistic_dict['actor_loss'].append(actor_loss.detach().mean())

            self.queues['learner_step_count'].value += 1
Ejemplo n.º 16
0
    def create_network(self, config):
        self.image_channels = config['image_channels']
        self.image_size = config['image_size']

        self.latent_dim_1 = config['latent_dim_1']
        self.latent_dim_2 = config['latent_dim_2']

        learning_rate = config[
            'learning_rate'] if 'learning_rate' in config else 1
        beta1 = config['beta1'] if 'beta1' in config else 0.9
        beta2 = config['beta2'] if 'beta2' in config else 0.999

        downsample_method = config['downsample_method']
        model_channels = config['model_channels']
        max_channels = config['max_channels']
        skip_blocks = config['skip_blocks'] if 'skip_blocks' in config else 1
        kernel = config['kernel'] if 'kernel' in config else 3

        dense_layers_1 = config[
            'dense_layers_1'] if 'dense_layers_1' in config else 1
        dense_layers_2 = config[
            'dense_layers_2'] if 'dense_layers_2' in config else 1

        normalization = config['normalization']
        activation = config['activation']

        output_activation_1 = config[
            'output_activation_1'] if 'output_activation_1' in config else activation
        output_activation_2 = config[
            'output_activation_2'] if 'output_activation_2' in config else activation

        assert dense_layers_1 >= 1, "Cannot have negative dense layers!"
        assert dense_layers_2 >= 1, "Cannot have negative dense layers!"
        assert skip_blocks >= 1, "Cannot have negative skip layers!"
        assert kernel >= 1, "Cannot have negative kernel size!"
        assert kernel % 2 == 1, "Kernel must be an odd number value!"
        assert self.image_size >= 4, "Image size must be at least 4!"
        assert self.latent_dim_1 >= 1, "Latent dimensions must be at least 1!"
        assert self.latent_dim_2 >= 1, "Latent dimensions must be at least 1!"

        downsamples = int(log2(self.image_size)) - 2

        def res_block():
            blocks = []
            for index in range(downsamples):
                in_channels = min((1 << index) * 1 * model_channels,
                                  max_channels)
                out_channels = min((1 << index) * 2 * model_channels,
                                   max_channels)

                blocks.append(
                    ResidualBlockDown(in_channels, out_channels,
                                      self.image_size >> index,
                                      downsample_method, kernel, normalization,
                                      activation))

                for _ in range(skip_blocks - 1):
                    blocks.append(
                        ResidualBlockDown(out_channels, out_channels,
                                          self.image_size >> (index + 1),
                                          'none', kernel, normalization,
                                          activation))

            return blocks

        def dense_block(layers, latent, output_activation):
            blocks = []

            dense_channels = min(
                (1 << downsamples) * model_channels, max_channels) * 4 * 4
            blocks.append(nn.Linear(dense_channels, latent))

            if layers > 1:
                blocks.append(get_normalization_1d(normalization, latent))
                blocks.append(get_activation(activation))

            for i in range(layers - 1):
                blocks.append(nn.Linear(latent, latent))

                if i < layers - 2:
                    blocks.append(get_normalization_1d(normalization, latent))
                    blocks.append(get_activation(activation))

            blocks.append(get_activation(output_activation))
            return blocks

        self.model = nn.Sequential(
            nn.Conv2d(self.image_channels, model_channels, kernel, 1,
                      kernel // 2),
            *res_block(),
            nn.Flatten(),
        )

        self.latent_1_model = nn.Sequential(*dense_block(
            dense_layers_1, self.latent_dim_1, output_activation_1))
        self.latent_2_model = nn.Sequential(*dense_block(
            dense_layers_2, self.latent_dim_2, output_activation_2))

        self.optimizer = Adam(self.parameters(),
                              lr=learning_rate,
                              betas=(beta1, beta2))
Ejemplo n.º 17
0
def test_RPS_SCNN_training():
    train_set = TensorDataset(
        torch.ones([50, 1, 204, 501]),
        torch.zeros([50, 2]),
        torch.ones([50, 204, 6]),
    )

    valid_set = TensorDataset(
        torch.ones([10, 1, 204, 501]),
        torch.zeros([10, 2]),
        torch.ones([10, 204, 6]),
    )

    print(len(train_set))

    device = "cpu"

    trainloader = DataLoader(
        train_set, batch_size=10, shuffle=False, num_workers=1
    )

    validloader = DataLoader(
        valid_set, batch_size=2, shuffle=False, num_workers=1
    )

    epochs = 1

    n_spatial_layer = 2
    spatial_kernel_size = [154, 51]

    temporal_n_block = 1
    # [[20, 10, 10, 8, 8, 5], [16, 8, 5, 5], [10, 10, 10, 10], [200, 200]]
    temporal_kernel_size = [250]
    max_pool = 2

    mlp_n_layer = 3
    mlp_hidden = 1024
    mlp_dropout = 0.5

    net = models.RPS_SCNN(
        n_spatial_layer,
        spatial_kernel_size,
        temporal_n_block,
        temporal_kernel_size,
        501,
        mlp_n_layer,
        mlp_hidden,
        mlp_dropout,
        max_pool=max_pool,
    )

    print(net)

    optimizer = Adam(net.parameters(), lr=0.00001)
    loss_function = torch.nn.MSELoss()

    print("begin training...")
    model, _, _ = train_bp(
        net,
        trainloader,
        validloader,
        optimizer,
        loss_function,
        device,
        epochs,
        10,
        0,
        "",
    )

    print("Training do not rise error")
Ejemplo n.º 18
0
    def train(self):
        train_sampler = RandomSampler(self.train_dataset)
        train_dataloader = DataLoader(self.train_dataset, sampler=train_sampler, batch_size=self.args.train_batch_size)

        t_total = len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs
        optimizer = Adam(self.model.parameters(), lr=self.args.learning_rate)
        scheduler = OneCycleLR(optimizer, max_lr=self.args.learning_rate, total_steps=t_total)

        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(self.train_dataset))
        logger.info("  Num Epochs = %d", self.args.num_train_epochs)
        logger.info("  Train batch size = %d", self.args.train_batch_size)
        logger.info("  Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
        logger.info("  Total optimization steps = %d", t_total)

        global_step = 0
        tr_loss, align_loss = 0.0, 0.0
        self.model.zero_grad()

        train_iterator = trange(int(self.args.num_train_epochs), desc="Epoch")

        for _ in train_iterator:
            epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True)
            for step, batch in enumerate(epoch_iterator):
                self.model.train()
                batch = tuple(t.to(self.device) for t in batch)  # GPU or CPU

                if self.args.task in [Tasks.MTOD.value, Tasks.MTOP.value, Tasks.M_ATIS.value]:
                    inputs = {'input_ids': batch[0], 'intent_labels': batch[1], 'slot_labels': batch[2]}
                elif self.args.task in [Tasks.PAWS_X.value]:
                    inputs = {'input_ids': batch[0], 'labels': batch[1]}
                else:
                    raise Exception("The task name '%s' is not recognised/supported." % self.args.task)

                outputs = self.model(**inputs)
                loss = outputs[0]
                tr_loss += loss.item()

                if self.args.align_languages:

                    encoder = self.model.roberta
                    indices = random.sample(range(len(self.alignment_dataset)), self.args.train_batch_size)

                    batch_one = torch.stack([self.alignment_dataset[index][0] for index in indices]).to(self.device)
                    outputs = encoder(input_ids=batch_one)
                    cls_logits = outputs[1]

                    batch_two = torch.stack([self.alignment_dataset[index][1] for index in indices]).to(self.device)
                    outputs = encoder(input_ids=batch_two)
                    cls_target = outputs[1]

                    loss_fn = MSELoss()
                    xero_align_loss = loss_fn(input=cls_logits, target=cls_target)
                    align_loss += xero_align_loss.item()
                    loss += xero_align_loss

                if self.args.gradient_accumulation_steps > 1:
                    loss = loss / self.args.gradient_accumulation_steps

                loss.backward()  # Only do this once for all losses

                if (step + 1) % self.args.gradient_accumulation_steps == 0:
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
                    optimizer.step()
                    scheduler.step()
                    self.model.zero_grad()
                    global_step += 1

            if self.args.task in [Tasks.MTOD.value, Tasks.MTOP.value, Tasks.M_ATIS.value]:
                self.evaluate_xnlu("dev", exp_name=self.args.model_dir)
            elif self.args.task in [Tasks.PAWS_X.value]:
                self.evaluate_pair('dev', exp_name=self.args.model_dir)
            else:
                raise Exception("The task name '%s' is not recognised/supported." % self.args.task)
            logging.info("--------------------------------------")
            logging.info("Train loss after %d steps: %.3f" % (global_step, (tr_loss / global_step)))
            if self.args.align_languages:
                logging.info("Align Loss after %d steps: %.3f" % (global_step, align_loss / max(1, global_step)))
            logging.info("--------------------------------------")
Ejemplo n.º 19
0
    def impute(self, configuration: Configuration, metadata: Metadata,
               architecture: Architecture, batch: Dict[str, Tensor]) -> Tensor:
        # loss function
        loss_function = create_component(architecture, metadata,
                                         configuration.reconstruction_loss)
        masked_loss_function = MaskedReconstructionLoss(loss_function)
        batch_size = batch["features"].shape[0] * batch["features"].shape[1]
        # we need the non missing mask for the loss
        non_missing_mask = inverse_mask(batch["missing_mask"])

        # initial noise
        noise = to_gpu_if_available(
            FloatTensor(len(batch["features"]),
                        architecture.arguments.noise_size).normal_())
        noise.requires_grad_()

        # it is not the generator what we are updating
        # it is the noise
        optimizer = Adam([noise],
                         weight_decay=0,
                         lr=configuration.noise_learning_rate)
        architecture.generator.eval()

        # logger
        log_path = create_parent_directories_if_needed(configuration.logs)
        logger = TrainLogger(self.logger, log_path, False)

        # initial generation
        logger.start_timer()
        generated = architecture.generator(noise,
                                           condition=batch.get("labels"))

        # iterate until we reach the maximum number of iterations or until the non missing loss is too small
        max_iterations = configuration.max_iterations
        for iteration in range(1, max_iterations + 1):
            # compute the loss on the non-missing values
            non_missing_loss = masked_loss_function(generated,
                                                    batch["features"],
                                                    non_missing_mask)
            logger.log(iteration, max_iterations, "non_missing_loss",
                       to_cpu_if_was_in_gpu(non_missing_loss).item())

            # this loss only makes sense if the ground truth is present
            # only used for debugging
            if configuration.get("log_missing_loss", False):
                # this part should not affect the gradient calculation
                with torch.no_grad():
                    missing_loss = masked_loss_function(
                        generated, batch["raw_features"],
                        batch["missing_mask"])
                    logger.log(iteration, max_iterations, "missing_loss",
                               to_cpu_if_was_in_gpu(missing_loss).item())

                    loss = loss_function(generated,
                                         batch["raw_features"]) / batch_size
                    logger.log(iteration, max_iterations, "loss",
                               to_cpu_if_was_in_gpu(loss).item())

            # if the generation is good enough we stop
            if to_cpu_if_was_in_gpu(non_missing_loss).item(
            ) < configuration.get("tolerance", 1e-5):
                break

            # clear previous gradients
            optimizer.zero_grad()
            # compute the gradients
            non_missing_loss.backward()
            # update the noise
            optimizer.step()

            # generate next
            logger.start_timer()
            generated = architecture.generator(noise,
                                               condition=batch.get("labels"))

        return generated
Ejemplo n.º 20
0
    def initialize_params(self, input_dim):
        if self.__input_dim is not None:
            return

        self.__input_dim = input_dim

        self.query_dense_layer = nn.Linear(
            input_dim,
            self.depth_dim,
            bias=False,
        )
        if self.initializer_f is not None:
            self.query_dense_layer.weight = self.initializer_f(
                self.query_dense_layer.weight
            )
        else:
            self.query_dense_layer.weight = torch.nn.init.xavier_normal_(
                self.query_dense_layer.weight,
                gain=1.0
            )

        self.key_dense_layer = nn.Linear(
            input_dim,
            self.depth_dim,
            bias=False,
        )
        if self.initializer_f is not None:
            self.key_dense_layer.weight = self.initializer_f(
                self.key_dense_layer.weight
            )
        else:
            self.key_dense_layer.weight = torch.nn.init.xavier_normal_(
                self.key_dense_layer.weight,
                gain=1.0
            )

        self.value_dense_layer = nn.Linear(
            input_dim,
            self.depth_dim,
            bias=False,
        )
        if self.initializer_f is not None:
            self.value_dense_layer.weight = self.initializer_f(
                self.value_dense_layer.weight
            )
        else:
            self.value_dense_layer.weight = torch.nn.init.xavier_normal_(
                self.value_dense_layer.weight,
                gain=1.0
            )

        self.output_dense_layer = nn.Linear(
            self.depth_dim,
            self.depth_dim,
            bias=False,
        )
        if self.initializer_f is not None:
            self.output_dense_layer.weight = self.initializer_f(
                self.output_dense_layer.weight
            )
        else:
            self.output_dense_layer.weight = torch.nn.init.xavier_normal_(
                self.output_dense_layer.weight,
                gain=1.0
            )

        self.to(self.__ctx)

        if self.init_deferred_flag is False:
            if self.__not_init_flag is False:
                if self.optimizer_f is not None:
                    self.optimizer = self.optimizer_f(
                        self.parameters()
                    )
                else:
                    self.optimizer = Adam(
                        self.parameters(), 
                        lr=self.__learning_rate,
                    )
Ejemplo n.º 21
0
    def __init__(self,
                 algo_params,
                 env,
                 transition_tuple=None,
                 path=None,
                 seed=-1):
        # environment
        self.env = PixelPybulletGym(
            env,
            image_size=algo_params['image_resize_size'],
            crop_size=algo_params['image_crop_size'])
        self.frame_stack = algo_params['frame_stack']
        self.env = FrameStack(self.env, k=self.frame_stack)
        self.env.seed(seed)
        obs = self.env.reset()
        algo_params.update({
            'state_shape':
            obs.shape,  # make sure the shape is like (C, H, W), not (H, W, C)
            'action_dim': self.env.action_space.shape[0],
            'action_max': self.env.action_space.high,
            'action_scaling': self.env.action_space.high[0],
        })
        # training args
        self.max_env_step = algo_params['max_env_step']
        self.testing_gap = algo_params['testing_gap']
        self.testing_episodes = algo_params['testing_episodes']
        self.saving_gap = algo_params['saving_gap']

        super(SACAE, self).__init__(algo_params,
                                    transition_tuple=transition_tuple,
                                    image_obs=True,
                                    training_mode='step_based',
                                    path=path,
                                    seed=seed)
        # torch
        self.encoder = PixelEncoder(self.state_shape)
        self.encoder_target = PixelEncoder(self.state_shape)
        self.network_dict.update({
            'actor':
            StochasticConvActor(self.action_dim,
                                encoder=self.encoder,
                                detach_obs_encoder=True).to(self.device),
            'critic_1':
            ConvCritic(self.action_dim,
                       encoder=self.encoder,
                       detach_obs_encoder=False).to(self.device),
            'critic_1_target':
            ConvCritic(self.action_dim,
                       encoder=self.encoder_target,
                       detach_obs_encoder=True).to(self.device),
            'critic_2':
            ConvCritic(self.action_dim,
                       encoder=self.encoder,
                       detach_obs_encoder=False).to(self.device),
            'critic_2_target':
            ConvCritic(self.action_dim,
                       encoder=self.encoder_target,
                       detach_obs_encoder=True).to(self.device),
            'alpha':
            algo_params['alpha'],
            'log_alpha':
            T.tensor(np.log(algo_params['alpha']),
                     requires_grad=True,
                     device=self.device),
        })
        self.network_keys_to_save = ['actor']
        self.actor_optimizer = Adam(self.network_dict['actor'].parameters(),
                                    lr=self.actor_learning_rate)
        self.critic_1_optimizer = Adam(
            self.network_dict['critic_1'].parameters(),
            lr=self.critic_learning_rate)
        self.critic_2_optimizer = Adam(
            self.network_dict['critic_2'].parameters(),
            lr=self.critic_learning_rate)
        self._soft_update(self.network_dict['critic_1'],
                          self.network_dict['critic_1_target'],
                          tau=1)
        self._soft_update(self.network_dict['critic_2'],
                          self.network_dict['critic_2_target'],
                          tau=1)
        self.target_entropy = -self.action_dim
        self.alpha_optimizer = Adam([self.network_dict['log_alpha']],
                                    lr=self.actor_learning_rate)
        # augmentation args
        self.image_random_shift = T.nn.Sequential(
            T.nn.ReplicationPad2d(4), aug.RandomCrop(self.state_shape[-2:]))
        self.q_regularisation_k = algo_params['q_regularisation_k']
        # training args
        self.warmup_step = algo_params['warmup_step']
        self.actor_update_interval = algo_params['actor_update_interval']
        self.critic_target_update_interval = algo_params[
            'critic_target_update_interval']
        # statistic dict
        self.statistic_dict.update({
            'episode_return': [],
            'env_step_return': [],
            'env_step_test_return': [],
            'alpha': [],
            'policy_entropy': [],
        })
Ejemplo n.º 22
0
class AttentionModel(nn.Module, ObservableData):
    '''
    Attention Model.

    References:
        - Bahdanau, D., Cho, K., & Bengio, Y. (2014). Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473.
        - Floridi, L., & Chiriatti, M. (2020). GPT-3: Its nature, scope, limits, and consequences. Minds and Machines, 30(4), 681-694.
        - Miller, A., Fisch, A., Dodge, J., Karimi, A. H., Bordes, A., & Weston, J. (2016). Key-value memory networks for directly reading documents. arXiv preprint arXiv:1606.03126.
        - Radford, A., Narasimhan, K., Salimans, T., & Sutskever, I. (2018) Improving Language Understanding by Generative Pre-Training. OpenAI (URL: https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf)
        - Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., & Sutskever, I. (2019). Language models are unsupervised multitask learners. OpenAI blog, 1(8), 9.
        - Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., & Polosukhin, I. (2017). Attention is all you need. arXiv preprint arXiv:1706.03762.

    '''

    # `bool` that means initialization in this class will be deferred or not.
    __init_deferred_flag = False

    __depth_dim = None

    def get_depth_dim(self):
        ''' getter '''
        return self.__depth_dim
    
    def set_depth_dim(self, value):
        ''' setter '''
        self.__depth_dim = value

    depth_dim = property(get_depth_dim, set_depth_dim)

    def __init__(
        self,
        depth_dim,
        computable_loss,
        initializer_f=None,
        optimizer_f=None,
        dropout_rate=0.5,
        learning_rate=1e-05,
        ctx="cpu",
        regularizatable_data_list=[],
        not_init_flag=False,
        output_nn=None,
    ):
        '''
        Init.

        Args:
            depth_dim:                      `int` of dimension of dense layer.
            computable_loss:                is-a `ComputableLoss` or `gluon.loss.Loss`.
            dropout_rate:                   `float` of dropout rate.
            initializer:                    is-a `mxnet.initializer.Initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution.
            learning_rate:                  `float` of learning rate.
            learning_attenuate_rate:        `float` of attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
            attenuate_epoch:                `int` of attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.

            optimizer_name:                 `str` of name of optimizer.
            ctx:                            `mx.cpu()` or `mx.gpu()`.
            hybridize_flag:                  Call `mxnet.gluon.HybridBlock.hybridize()` or not.
            regularizatable_data_list:           `list` of `RegularizatableData`.
            not_init_flag:                  `bool` of whether initialize parameters or not.
        '''
        if isinstance(depth_dim, int) is False:
            raise TypeError("The type of `depth_dim` must be `int`.")
        if isinstance(computable_loss, ComputableLoss) is False and isinstance(computable_loss, nn.modules.loss._Loss) is False:
            raise TypeError("The type of `computable_loss` must be `ComputableLoss` or `gluon.loss.Loss`.")

        super(AttentionModel, self).__init__()
        self.__computable_loss = computable_loss
        self.initializer_f = initializer_f
        self.optimizer_f = optimizer_f
        self.dropout = nn.Dropout(p=dropout_rate)
        self.softmax = nn.Softmax(dim=-1)
        self.output_nn = output_nn

        for v in regularizatable_data_list:
            if isinstance(v, RegularizatableData) is False:
                raise TypeError("The type of values of `regularizatable_data_list` must be `RegularizatableData`.")
        self.__regularizatable_data_list = regularizatable_data_list

        self.__ctx = ctx
        self.ctx = ctx
        self.__learning_rate = learning_rate

        self.depth_dim = depth_dim

        logger = getLogger("accelbrainbase")
        self.__logger = logger

        self.__input_dim = None
        self.__not_init_flag = not_init_flag

        self.epoch = 0

    def initialize_params(self, input_dim):
        if self.__input_dim is not None:
            return

        self.__input_dim = input_dim

        self.query_dense_layer = nn.Linear(
            input_dim,
            self.depth_dim,
            bias=False,
        )
        if self.initializer_f is not None:
            self.query_dense_layer.weight = self.initializer_f(
                self.query_dense_layer.weight
            )
        else:
            self.query_dense_layer.weight = torch.nn.init.xavier_normal_(
                self.query_dense_layer.weight,
                gain=1.0
            )

        self.key_dense_layer = nn.Linear(
            input_dim,
            self.depth_dim,
            bias=False,
        )
        if self.initializer_f is not None:
            self.key_dense_layer.weight = self.initializer_f(
                self.key_dense_layer.weight
            )
        else:
            self.key_dense_layer.weight = torch.nn.init.xavier_normal_(
                self.key_dense_layer.weight,
                gain=1.0
            )

        self.value_dense_layer = nn.Linear(
            input_dim,
            self.depth_dim,
            bias=False,
        )
        if self.initializer_f is not None:
            self.value_dense_layer.weight = self.initializer_f(
                self.value_dense_layer.weight
            )
        else:
            self.value_dense_layer.weight = torch.nn.init.xavier_normal_(
                self.value_dense_layer.weight,
                gain=1.0
            )

        self.output_dense_layer = nn.Linear(
            self.depth_dim,
            self.depth_dim,
            bias=False,
        )
        if self.initializer_f is not None:
            self.output_dense_layer.weight = self.initializer_f(
                self.output_dense_layer.weight
            )
        else:
            self.output_dense_layer.weight = torch.nn.init.xavier_normal_(
                self.output_dense_layer.weight,
                gain=1.0
            )

        self.to(self.__ctx)

        if self.init_deferred_flag is False:
            if self.__not_init_flag is False:
                if self.optimizer_f is not None:
                    self.optimizer = self.optimizer_f(
                        self.parameters()
                    )
                else:
                    self.optimizer = Adam(
                        self.parameters(), 
                        lr=self.__learning_rate,
                    )

    def learn(self, iteratable_data):
        '''
        Learn samples drawn by `IteratableData.generate_learned_samples()`.

        Args:
            iteratable_data:     is-a `IteratableData`.
        '''
        if isinstance(iteratable_data, IteratableData) is False:
            raise TypeError("The type of `iteratable_data` must be `IteratableData`.")

        self.__loss_list = []
        learning_rate = self.__learning_rate

        pre_batch_observed_arr = None
        pre_test_batch_observed_arr = None
        try:
            epoch = self.epoch
            iter_n = 0
            for batch_observed_arr, batch_target_arr, test_batch_observed_arr, test_batch_target_arr in iteratable_data.generate_learned_samples():
                # Self-Attention.
                self.__batch_size = batch_observed_arr.shape[0]
                self.__seq_len = batch_observed_arr.shape[1]
                if len(batch_observed_arr.shape) == 2:
                    batch_observed_arr = torch.unsqueeze(batch_observed_arr, axis=1)
                elif len(batch_observed_arr.shape) > 3:
                    batch_observed_arr = batch_observed_arr.reshape((
                        self.__batch_size,
                        self.__seq_len,
                        -1
                    ))

                self.initialize_params(
                    input_dim=batch_observed_arr.shape[2]
                )
                self.epoch = epoch

                if self.output_nn is not None:
                    if hasattr(self.output_nn, "optimizer") is False:
                        _ = self.inference(batch_observed_arr, batch_observed_arr)

                self.optimizer.zero_grad()
                if self.output_nn is not None:
                    self.output_nn.optimizer.zero_grad()

                pred_arr = self.inference(batch_observed_arr, batch_observed_arr)
                loss = self.compute_loss(
                    pred_arr,
                    batch_target_arr
                )
                loss.backward()
                if self.output_nn is not None:
                    self.output_nn.optimizer.step()
                self.optimizer.step()
                self.regularize()

                if (iter_n+1) % int(iteratable_data.iter_n / iteratable_data.epochs) == 0:
                    with torch.inference_mode():
                        if len(test_batch_observed_arr.shape) == 2:
                            test_batch_observed_arr = torch.unsqueeze(test_batch_observed_arr, axis=1)

                        test_pred_arr = self.inference(test_batch_observed_arr, test_batch_observed_arr)

                        test_loss = self.compute_loss(
                            test_pred_arr,
                            test_batch_target_arr
                        )
                    _loss = loss.to('cpu').detach().numpy().copy()
                    _test_loss = test_loss.to('cpu').detach().numpy().copy()

                    self.__loss_list.append((_loss, _test_loss))
                    self.__logger.debug("Epochs: " + str(epoch + 1) + " Train loss: " + str(_loss) + " Test loss: " + str(_test_loss))
                    epoch += 1
                iter_n += 1

        except KeyboardInterrupt:
            self.__logger.debug("Interrupt.")

        self.__logger.debug("end. ")
        self.epoch = epoch

    def inference(self, observed_arr, memory_arr):
        '''
        Inference samples drawn by `IteratableData.generate_inferenced_samples()`.

        Args:
            observed_arr:   Array like or sparse matrix as the observed data points.
            memory_arr:     Array like or sparse matrix as the observed data points.

        Returns:
            `mxnet.ndarray` of inferenced feature points.
        '''
        return self(observed_arr, memory_arr)

    def compute_loss(self, pred_arr, labeled_arr):
        '''
        Compute loss.

        Args:
            pred_arr:       `mxnet.ndarray` or `mxnet.symbol`.
            labeled_arr:    `mxnet.ndarray` or `mxnet.symbol`.

        Returns:
            loss.
        '''
        return self.__computable_loss(pred_arr, labeled_arr)

    def regularize(self):
        '''
        Regularization.
        '''
        if len(self.__regularizatable_data_list) > 0:
            params_dict = self.extract_learned_dict()
            for regularizatable in self.__regularizatable_data_list:
                params_dict = regularizatable.regularize(params_dict)

            for k, params in params_dict.items():
                self.load_state_dict({k: params}, strict=False)

    def extract_learned_dict(self):
        '''
        Extract (pre-) learned parameters.

        Returns:
            `dict` of the parameters.
        '''
        params_dict = {}
        for k in self.state_dict().keys():
            params_dict.setdefault(k, self.state_dict()[k])

        return params_dict

    def forward(self, x, m):
        '''
        Hybrid forward with Gluon API.

        Args:
            F:      `mxnet.ndarray` or `mxnet.symbol`.
            x:      `mxnet.ndarray` of observed data points.
            m:      `mxnet.ndarray` of observed data points. The shape is (batch_size, length of memory, depth).
        
        Returns:
            `mxnet.ndarray` or `mxnet.symbol` of inferenced feature points.
        '''
        # Self-Attention.
        self.__batch_size = x.shape[0]
        self.__seq_len = x.shape[1]
        if len(x.shape) == 2:
            x = torch.unsqueeze(x, axis=1)
        elif len(x.shape) > 3:
            x = x.reshape((
                self.__batch_size,
                self.__seq_len,
                -1
            ))

        m = m.reshape_as(x)

        self.initialize_params(
            input_dim=x.shape[2]
        )

        query_arr = self.query_dense_layer(x)
        key_arr = self.key_dense_layer(m)
        value_arr = self.value_dense_layer(m)
        logit_arr = torch.bmm(query_arr, key_arr.reshape((
            key_arr.shape[0],
            key_arr.shape[2],
            key_arr.shape[1]
        )))
        attention_weight_arr = self.softmax(logit_arr)
        attention_weight_arr = self.dropout(attention_weight_arr)
        attention_output_arr = torch.bmm(
            attention_weight_arr, 
            value_arr.reshape((
                value_arr.shape[0],
                value_arr.shape[2],
                value_arr.shape[1]
            ))
        )
        output_arr = self.output_dense_layer(attention_output_arr)

        if self.output_nn is not None:
            output_arr = self.output_nn(output_arr)

        return output_arr

    def set_readonly(self, value):
        ''' setter '''
        raise TypeError("This property must be read-only.")

    def get_loss_arr(self):
        ''' getter for losses. '''
        return np.array(self.__loss_list)

    loss_arr = property(get_loss_arr, set_readonly)

    def get_init_deferred_flag(self):
        ''' getter for `bool` that means initialization in this class will be deferred or not. '''
        return self.__init_deferred_flag
    
    def set_init_deferred_flag(self, value):
        ''' setter for `bool` that means initialization in this class will be deferred or not. '''
        self.__init_deferred_flag = value

    init_deferred_flag = property(get_init_deferred_flag, set_init_deferred_flag)
Ejemplo n.º 23
0
class Trainer:
    def __init__(self, classes_info, model_info, size):
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.det = Detector(classes_info, model_info, self.device)
        # self.det.load_model("checkpoints/model_0.6161.pth")
        self.has_train_config = False
        self.size = size

    def make_train_config(self, image_path, train_path, val_path, lr,
                          batch_size, num_workers):
        self._make_optimizer(lr)
        self._make_dataset(image_path, train_path, batch_size, val_path,
                           num_workers)
        self._make_criterion()
        self.min_loss = float("inf")
        self.has_train_config = True

    def _make_dataset(self,
                      image_path,
                      train_path,
                      batch_size,
                      val_path=None,
                      num_workers=6):
        train_trans = [
            CropResize(self.size),
            Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ]
        train_trans = Compose(train_trans)
        train_data = MaskData(image_path, train_path, (self.size, self.size),
                              train_trans)
        self.train_loader = DataLoader(train_data,
                                       batch_size,
                                       True,
                                       num_workers=num_workers,
                                       pin_memory=True)
        if val_path:
            val_trans = [
                CropResize(self.size),
                Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
            ]
            val_trans = Compose(val_trans)
            val_data = MaskData(image_path, val_path, (self.size, self.size),
                                val_trans)
            self.val_loader = DataLoader(val_data,
                                         batch_size,
                                         num_workers=num_workers,
                                         pin_memory=True)
        else:
            self.val_loader = None

    def _make_criterion(self):
        self.criterion = FocalLoss()

    def _make_optimizer(self, lr):
        self.op = Adam(self.det.get_param(), lr=lr, weight_decay=5e-4)

    def fit_one_epoch(self):
        assert self.has_train_config, "还没配置好训练参数"
        avg_loss = AVGMetrics("train_loss")
        self.det.set_status("train")
        with tqdm(self.train_loader, desc="train") as pbar:
            for data in pbar:
                self.op.zero_grad()
                data = {k: v.to(self.device) for k, v in data.items()}
                images = data["image"]
                pred = self.det.inference(images)
                loss, scalar = self.criterion(pred, data)
                avg_loss.update(loss.item(), len(images))
                loss.backward()
                self.op.step()
                pbar.set_postfix(**scalar)
        return str(avg_loss)

    def eval_one_epoch(self, save_dir, prefix=""):
        assert self.val_loader, "验证集没有构建"
        avg_loss = AVGMetrics("val_loss")
        self.det.set_status("eval")
        with torch.no_grad():
            with tqdm(self.train_loader, desc="eval") as pbar:
                for data in pbar:
                    data = {k: v.to(self.device) for k, v in data.items()}
                    images = data["image"]
                    pred = self.det.inference(images)
                    loss, scalar = self.criterion(pred, data)
                    avg_loss.update(loss.item(), len(images))
                    pbar.set_postfix(**scalar)
        if self.min_loss > avg_loss():
            self.min_loss = avg_loss()
            self.det.save_model(f"{save_dir}/{prefix}_{self.min_loss}.pth")
            print(f"min_loss update to {self.min_loss}")
        return str(avg_loss)
print('state_dim: ', state_dim)

if environment.is_discrete():
    print('discrete')
else:
    print('continous')

if action_dim is None:
    action_dim = environment.get_action_dim()
print('action_dim: ', action_dim)

policy_dist         = Policy_Dist(use_gpu)
sac_memory          = Policy_Memory()
runner_memory       = Policy_Memory()
q_loss              = Q_loss(policy_dist)
policy_loss         = Policy_loss(policy_dist)

policy              = Policy_Model(state_dim, action_dim, use_gpu).float().to(set_device(use_gpu))
soft_q1             = Q_Model(state_dim, action_dim).float().to(set_device(use_gpu))
soft_q2             = Q_Model(state_dim, action_dim).float().to(set_device(use_gpu))
policy_optimizer    = Adam(list(policy.parameters()), lr = learning_rate)
soft_q_optimizer    = Adam(list(soft_q1.parameters()) + list(soft_q2.parameters()), lr = learning_rate)

agent = Agent(soft_q1, soft_q2, policy, state_dim, action_dim, policy_dist, q_loss, policy_loss, sac_memory, 
        soft_q_optimizer, policy_optimizer, is_training_mode, batch_size, epochs, 
        soft_tau, folder, use_gpu)
                    
runner      = Runner(agent, environment, runner_memory, is_training_mode, render, environment.is_discrete(), max_action, SummaryWriter(), n_plot_batch) # [Runner.remote(i_env, render, training_mode, n_update, Wrapper.is_discrete(), agent, max_action, None, n_plot_batch) for i_env in env]
executor    = Executor(agent, n_iteration, runner, save_weights, n_saved, load_weights, is_training_mode)

executor.execute()
Ejemplo n.º 25
0
else:
    model.load_weights(args.weights_path)

if args.device == 'cuda':
    model = nn.DataParallel(model)

logging.info(model)

### train info ###
if args.optimizer == 'SGD':
    optimizer = SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
    logging.info('Optimizer Info:'
                 '\nOptimizer: {}'
                 '\nlearning rate: {}, Momentum: {}, Weight decay: {}\n'.format(args.optimizer, args.learning_rate, args.momentum, args.weight_decay))
elif args.optimizer == 'Adam':
    optimizer = Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
    logging.info('Optimizer Info:'
                 '\nOptimizer: {}'
                 '\nlearning rate: {}, Weight decay: {}\n'.format(args.optimizer, args.learning_rate, args.weight_decay))
else:
    assert False, "Invalid optimizer"

iter_scheduler = IterMultiStepLR(optimizer, milestones=args.steplr_milestones, gamma=args.steplr_gamma)
logging.info('Multi Step Info:'
             '\nmilestones: {}'
             '\ngamma: {}\n'.format(args.steplr_milestones, args.steplr_gamma))

save_manager = SaveManager(modelname=args.model_name, interval=args.checkpoints_interval, max_checkpoints=15, plot_interval=100)
trainer = TrainObjectDetectionConsoleLogger(loss_module=TextBoxLoss(alpha=args.loss_alpha, conf_loss=ConfidenceLoss(neg_factor=args.neg_factor)), model=model, optimizer=optimizer, scheduler=iter_scheduler)

logging.info('Save Info:'
Ejemplo n.º 26
0
class Brain:
    def __init__(self, state_shape, n_actions, device, n_workers, epochs,
                 n_iters, epsilon, lr):
        self.state_shape = state_shape
        self.n_actions = n_actions
        self.device = device
        self.n_workers = n_workers
        self.mini_batch_size = 32
        self.epochs = epochs
        self.n_iters = n_iters
        self.initial_epsilon = epsilon
        self.epsilon = self.initial_epsilon
        self.lr = lr

        self.current_policy = Model(self.state_shape,
                                    self.n_actions).to(self.device)

        self.optimizer = Adam(self.current_policy.parameters(),
                              lr=self.lr,
                              eps=1e-5)
        self._schedule_fn = lambda step: max(1.0 - float(step / self.n_iters),
                                             0)
        self.scheduler = LambdaLR(self.optimizer, lr_lambda=self._schedule_fn)

    def get_actions_and_values(self, state, batch=False):
        if not batch:
            state = np.expand_dims(state, 0)
        state = from_numpy(state).byte().permute([0, 3, 1, 2]).to(self.device)
        with torch.no_grad():
            dist, value = self.current_policy(state)
            action = dist.sample()
            log_prob = dist.log_prob(action)
        return action.cpu().numpy(), value.detach().cpu().numpy().squeeze(
        ), log_prob.cpu().numpy()

    def choose_mini_batch(self, states, actions, returns, advs, values,
                          log_probs):
        for worker in range(self.n_workers):
            idxes = np.random.randint(0, states.shape[1], self.mini_batch_size)
            yield states[worker][idxes], actions[worker][idxes], returns[worker][idxes], advs[worker][idxes], \
                  values[worker][idxes], log_probs[worker][idxes]

    def train(self, states, actions, rewards, dones, values, log_probs,
              next_values):
        returns = self.get_gae(rewards, values.copy(), next_values, dones)
        values = np.vstack(
            values)  # .reshape((len(values[0]) * self.n_workers,))
        advs = returns - values
        advs = (advs - advs.mean(1).reshape((-1, 1))) / (advs.std(1).reshape(
            (-1, 1)) + 1e-8)
        for epoch in range(self.epochs):
            for state, action, q_value, adv, old_value, old_log_prob in self.choose_mini_batch(
                    states, actions, returns, advs, values, log_probs):
                state = torch.ByteTensor(state).permute([0, 3, 1,
                                                         2]).to(self.device)
                action = torch.Tensor(action).to(self.device)
                adv = torch.Tensor(adv).to(self.device)
                q_value = torch.Tensor(q_value).to(self.device)
                old_value = torch.Tensor(old_value).to(self.device)
                old_log_prob = torch.Tensor(old_log_prob).to(self.device)

                dist, value = self.current_policy(state)
                entropy = dist.entropy().mean()
                new_log_prob = self.calculate_log_probs(
                    self.current_policy, state, action)
                ratio = (new_log_prob - old_log_prob).exp()
                actor_loss = self.compute_ac_loss(ratio, adv)

                clipped_value = old_value + torch.clamp(
                    value.squeeze() - old_value, -self.epsilon, self.epsilon)
                clipped_v_loss = (clipped_value - q_value).pow(2)
                unclipped_v_loss = (value.squeeze() - q_value).pow(2)
                critic_loss = 0.5 * torch.max(clipped_v_loss,
                                              unclipped_v_loss).mean()

                total_loss = critic_loss + actor_loss - 0.01 * entropy
                self.optimize(total_loss)

        return total_loss.item(), entropy.item(), \
               explained_variance(values.reshape((len(returns[0]) * self.n_workers,)),
                                  returns.reshape((len(returns[0]) * self.n_workers,)))

    def schedule_lr(self):
        self.scheduler.step()

    def schedule_clip_range(self, iter):
        self.epsilon = max(1.0 - float(iter / self.n_iters),
                           0) * self.initial_epsilon

    def optimize(self, loss):
        self.optimizer.zero_grad()
        loss.backward()
        torch.nn.utils.clip_grad_norm_(self.current_policy.parameters(), 0.5)
        self.optimizer.step()

    def get_gae(self,
                rewards,
                values,
                next_values,
                dones,
                gamma=0.99,
                lam=0.95):

        returns = [[] for _ in range(self.n_workers)]
        extended_values = np.zeros((self.n_workers, len(rewards[0]) + 1))
        for worker in range(self.n_workers):
            extended_values[worker] = np.append(values[worker],
                                                next_values[worker])
            gae = 0
            for step in reversed(range(len(rewards[worker]))):
                delta = rewards[worker][step] + \
                        gamma * (extended_values[worker][step + 1]) * (1 - dones[worker][step]) \
                        - extended_values[worker][step]
                gae = delta + gamma * lam * (1 - dones[worker][step]) * gae
                returns[worker].insert(0, gae + extended_values[worker][step])

        return np.vstack(
            returns)  # .reshape((len(returns[0]) * self.n_workers,))

    @staticmethod
    def calculate_log_probs(model, states, actions):
        policy_distribution, _ = model(states)
        return policy_distribution.log_prob(actions)

    def compute_ac_loss(self, ratio, adv):
        new_r = ratio * adv
        clamped_r = torch.clamp(ratio, 1 - self.epsilon,
                                1 + self.epsilon) * adv
        loss = torch.min(new_r, clamped_r)
        loss = -loss.mean()
        return loss

    def save_params(self, iteration, running_reward):
        torch.save(
            {
                "current_policy_state_dict": self.current_policy.state_dict(),
                "optimizer_state_dict": self.optimizer.state_dict(),
                "scheduler_state_dict": self.scheduler.state_dict(),
                "iteration": iteration,
                "running_reward": running_reward,
                "clip_range": self.epsilon
            }, "params.pth")

    def load_params(self):
        checkpoint = torch.load("params.pth", map_location=self.device)
        self.current_policy.load_state_dict(
            checkpoint["current_policy_state_dict"])
        self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
        self.scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
        iteration = checkpoint["iteration"]
        running_reward = checkpoint["running_reward"]
        self.epsilon = checkpoint["clip_range"]

        return running_reward, iteration

    def set_to_eval_mode(self):
        self.current_policy.eval()
    def train(self):
        optimizer_ae = Adam(chain(self.Encoder.parameters(),
                                  self.Decoder.parameters()),
                            self.lr,
                            betas=(self.b1, self.b2),
                            weight_decay=self.weight_decay)
        optimizer_discriminator = Adam(self.Disciminator.parameters(),
                                       self.lr,
                                       betas=(self.b1, self.b2),
                                       weight_decay=self.weight_decay)
        lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
            optimizer_ae,
            LambdaLR(self.num_epoch, self.epoch, self.decay_epoch).step)
        total_step = len(self.data_loader)

        perceptual_criterion = PerceptualLoss().to(self.device)
        content_criterion = nn.L1Loss().to(self.device)
        adversarial_criterion = nn.BCELoss().to(self.device)

        self.Encoder.train()
        self.Decoder.train()
        content_losses = AverageMeter()
        generator_losses = AverageMeter()
        perceptual_losses = AverageMeter()
        discriminator_losses = AverageMeter()
        ae_losses = AverageMeter()

        lr_window = create_vis_plot('Epoch', 'Learning rate', 'Learning rate')
        loss_window = create_vis_plot('Epoch', 'Loss', 'Total Loss')
        generator_loss_window = create_vis_plot('Epoch', 'Loss',
                                                'Generator Loss')
        discriminator_loss_window = create_vis_plot('Epoch', 'Loss',
                                                    'Discriminator Loss')
        content_loss_window = create_vis_plot('Epoch', 'Loss', 'Content Loss')
        perceptual_loss_window = create_vis_plot('Epoch', 'Loss',
                                                 'Perceptual Loss')

        if not os.path.exists(self.sample_dir):
            os.makedirs(self.sample_dir)
        if not os.path.exists(self.checkpoint_dir):
            os.makedirs(self.checkpoint_dir)

        for epoch in range(self.epoch, self.num_epoch):
            content_losses.reset()
            perceptual_losses.reset()
            generator_losses.reset()
            ae_losses.reset()
            discriminator_losses.reset()
            for step, images in enumerate(self.data_loader):
                images = images.to(self.device)

                real_labels = torch.ones((images.size(0), 1)).to(self.device)
                fake_labels = torch.zeros((images.size(0), 1)).to(self.device)

                encoded_image = self.Encoder(images)

                binary_decoded_image = paq.compress(
                    encoded_image.cpu().detach().numpy().tobytes())
                # encoded_image = paq.decompress(binary_decoded_image)
                #
                # encoded_image = torch.from_numpy(np.frombuffer(encoded_image, dtype=np.float32)
                #                                  .reshape(-1, self.storing_channels, self.image_size // 8,
                #                                           self.image_size // 8)).to(self.device)

                decoded_image = self.Decoder(encoded_image)

                content_loss = content_criterion(images, decoded_image)
                perceptual_loss = perceptual_criterion(images, decoded_image)
                generator_loss = adversarial_criterion(
                    self.Disciminator(decoded_image), real_labels)
                # generator_loss = -self.Disciminator(decoded_image).mean()

                ae_loss = content_loss * self.content_loss_factor + perceptual_loss * self.perceptual_loss_factor + \
                          generator_loss * self.generator_loss_factor

                content_losses.update(content_loss.item())
                perceptual_losses.update(perceptual_loss.item())
                generator_losses.update(generator_loss.item())
                ae_losses.update(ae_loss.item())

                optimizer_ae.zero_grad()
                ae_loss.backward(retain_graph=True)
                optimizer_ae.step()

                interpolated_image = self.eta * images + (
                    1 - self.eta) * decoded_image
                gravity_penalty = self.Disciminator(interpolated_image).mean()
                real_loss = adversarial_criterion(self.Disciminator(images),
                                                  real_labels)
                fake_loss = adversarial_criterion(
                    self.Disciminator(decoded_image), fake_labels)
                discriminator_loss = (real_loss + fake_loss) * self.discriminator_loss_factor / 2 +\
                                     gravity_penalty * self.penalty_loss_factor

                # discriminator_loss = self.Disciminator(decoded_image).mean() - self.Disciminator(images).mean() + \
                #                      gravity_penalty * self.penalty_loss_factor

                optimizer_discriminator.zero_grad()
                discriminator_loss.backward(retain_graph=True)
                optimizer_discriminator.step()
                discriminator_losses.update(discriminator_loss.item())

                if step % 100 == 0:
                    print(
                        f"[Epoch {epoch}/{self.num_epoch}] [Batch {step}/{total_step}] [Learning rate {get_lr(optimizer_ae)}] "
                        f"[Content {content_loss:.4f}] [Perceptual {perceptual_loss:.4f}] [Gan {generator_loss:.4f}]"
                        f"[Discriminator {discriminator_loss:.4f}]")

                    save_image(
                        torch.cat([images, decoded_image], dim=2),
                        os.path.join(self.sample_dir,
                                     f"Sample-epoch-{epoch}-step-{step}.png"))

            update_vis_plot(epoch, ae_losses.avg, loss_window, 'append')
            update_vis_plot(epoch, generator_losses.avg, generator_loss_window,
                            'append')
            update_vis_plot(epoch, discriminator_losses.avg,
                            discriminator_loss_window, 'append')
            update_vis_plot(epoch, content_losses.avg, content_loss_window,
                            'append')
            update_vis_plot(epoch, perceptual_losses.avg,
                            perceptual_loss_window, 'append')
            update_vis_plot(epoch, get_lr(optimizer_ae), lr_window, 'append')

            lr_scheduler.step()

            torch.save(
                self.Encoder.state_dict(),
                os.path.join(self.checkpoint_dir, f"Encoder-{epoch}.pth"))
            torch.save(
                self.Decoder.state_dict(),
                os.path.join(self.checkpoint_dir, f"Decoder-{epoch}.pth"))
            torch.save(
                self.Disciminator.state_dict(),
                os.path.join(self.checkpoint_dir,
                             f"Discriminator-{epoch}.pth"))
Ejemplo n.º 28
0
def split_optimizer(model: nn.Module, cfg: dict):
    param_weight_decay, param_bias, param_other = split_params(model)
    if len(param_other) != 0:
        if cfg['optimizer'] == 'Adam':
            optimizer = Adam(param_other, lr=cfg['lr'])
        elif cfg['optimizer'] == 'SGD':
            optimizer = SGD(param_other,
                            lr=cfg['lr'],
                            momentum=cfg['momentum'])
        else:
            raise NotImplementedError("optimizer {:s} is not support!".format(
                cfg['optimizer']))
        optimizer.add_param_group({
            'params': param_weight_decay,
            'weight_decay': cfg['weight_decay']
        })  # add pg1 with weight_decay
        optimizer.add_param_group({'params': param_bias})
    else:
        if cfg['optimizer'] == 'Adam':
            optimizer = Adam(param_weight_decay,
                             lr=cfg['lr'],
                             weight_decay=cfg['weight_decay'])
        elif cfg['optimizer'] == 'SGD':
            optimizer = SGD(param_weight_decay,
                            lr=cfg['lr'],
                            momentum=cfg['momentum'],
                            weight_decay=cfg['weight_decay'])
        else:
            raise NotImplementedError("optimizer {:s} is not support!".format(
                cfg['optimizer']))
        optimizer.add_param_group({'params': param_bias})
    return optimizer
Ejemplo n.º 29
0
class ModelContext:
    def __init__(self,
                 name,
                 model,
                 in_size,
                 out_size,
                 model_size,
                 criterion=nn.functional.mse_loss,
                 lr=0.001):
        self.model = copy.deepcopy(model)
        self.wrapped = nn.Sequential(
            FLT(), simple.mlp(in_size, out_features=model_size), self.model,
            simple.mlp(model_size, out_features=out_size), Rev())
        self.optimizer = Adam(lr=lr, params=self.wrapped.parameters())
        self.criterion = criterion
        self.train_losses = []
        self.reg_losses = []
        self.grad_losses = []
        self.epochs_per_train = 2
        self.max_parts = 20
        self.name = name
        self.wrapped.window_config = WindowConfig(1, 1, 0)
        self.wrapped.name = name

    def set_params(self, outer):
        for param_to, param_form in zip(self.model.parameters(),
                                        outer.parameters()):
            param_to.data.copy_(param_form)

    def train(self, train_set_fn):
        for epoch in range(self.epochs_per_train):
            epoch_loss = 0
            parts = 0
            for X, y in train_set_fn:
                self.optimizer.zero_grad()
                y_hat = self.wrapped(X)
                loss = self.criterion(y_hat, y)
                loss.backward()
                epoch_loss += loss.item()
                self.optimizer.step()

                parts += 1
                if parts == self.max_parts:
                    break
            self.train_losses.append(epoch_loss)

    def train_with_val(self, window):
        train.train_window_models([self.wrapped],
                                  window,
                                  patience=4,
                                  validate=True,
                                  weight_decay=0,
                                  max_epochs=100,
                                  lrs=[0.0001],
                                  target_current_frame=True,
                                  source='agg',
                                  target='lr',
                                  log=False)
        # -------
        _, axs = plt.subplots(ncols=3, figsize=(20, 6))
        window.plot_lr(axs=axs, offsets=[0])
        window.plot_model(model=self.wrapped,
                          axs=axs,
                          other={
                              'y_offset': -1,
                              'source': 'agg',
                              'target': 'lr'
                          })

    def get_loss(self, data_set_fn):
        loss = torch.Tensor([0])
        for X, y in data_set_fn:
            y_hat = self.wrapped(X)
            loss += self.criterion(y_hat, y)
        return loss

    def pull_back(self, outer, lam=0.0):
        self.optimizer.zero_grad()
        loss = self.reg_loss(lam, outer)
        loss.backward()
        self.reg_losses.append(loss.item())
        self.optimizer.step()  # do we need this?

    def reg_loss(self, lam, outer):
        dist = 0
        for param_to, param_form in zip(self.model.parameters(),
                                        outer.parameters()):
            dist += torch.sum((param_form - param_to)**2)
        return 0.5 * lam * dist

    def cg(self, wrapped_window, lam):
        params = list(self.model.parameters())

        train_loss = self.get_loss(wrapped_window.train)
        val_loss = self.get_loss(wrapped_window.val)

        train_grad = torch.autograd.grad(train_loss, params, create_graph=True)
        # train_grad = torch.autograd.grad(train_loss, params)
        train_grad = nn.utils.parameters_to_vector(train_grad)
        val_grad = torch.autograd.grad(val_loss, params)
        val_grad = nn.utils.parameters_to_vector(val_grad)
        grad_loss = Cg(model, lam).cg(train_grad, val_grad, params)
        self.grad_losses.append(grad_loss)

        return grad_loss

    def plot(self, axs=None, normalize=True):
        if axs is None:
            fig, axs = plt.subplots(ncols=2, figsize=(12, 6))

        long_index = np.arange(start=0, stop=len(self.train_losses), step=1)
        short_index = np.arange(start=0, stop=len(self.reg_losses), step=1)
        # df = pd.DataFrame(index=short_index, data=self.reg_losses, columns=[f'{name}-regloss'])
        # if normalize:
        #     df = (df - df.min()) / (df.max() - df.min())
        # df.plot(ax=axs[0])
        df = pd.DataFrame(index=long_index,
                          data=self.train_losses,
                          columns=[f'{self.name}-trainloss'])
        if normalize:
            df = (df - df.min()) / (df.max() - df.min())
        df.plot(ax=axs[1])
Ejemplo n.º 30
0
 def configure_optimizers(self):
     return Adam(self.parameters(), lr=self.lr)