Exemplo n.º 1
0
def main(args):

    meta_info = {
        "POLLUTION": [5, 50, 14],
        "HR": [32, 50, 13],
        "BATTERY": [20, 50, 3]
    }

    output_directory = "output/"
    horizon = 10
    output_dim = 1

    dataset_name = args.dataset
    save_model_file = args.save_model_file
    load_model_file = args.load_model_file
    lower_trial = args.lower_trial
    upper_trial = args.upper_trial
    learning_rate = args.learning_rate
    meta_learning_rate = args.meta_learning_rate
    adaptation_steps = args.adaptation_steps
    batch_size = args.batch_size
    model_name = args.model
    is_test = args.is_test
    patience_stopping = args.stopping_patience
    epochs = args.epochs
    noise_level = args.noise_level
    noise_type = args.noise_type

    assert model_name in ("FCN", "LSTM"), "Model was not correctly specified"
    assert dataset_name in ("POLLUTION", "HR", "BATTERY")

    window_size, task_size, input_dim = meta_info[dataset_name]
    grid = [0., noise_level]

    train_data = pickle.load(
        open(
            "../../Data/TRAIN-" + dataset_name + "-W" + str(window_size) +
            "-T" + str(task_size) + "-NOML.pickle", "rb"))
    train_data_ML = pickle.load(
        open(
            "../../Data/TRAIN-" + dataset_name + "-W" + str(window_size) +
            "-T" + str(task_size) + "-ML.pickle", "rb"))
    validation_data = pickle.load(
        open(
            "../../Data/VAL-" + dataset_name + "-W" + str(window_size) + "-T" +
            str(task_size) + "-NOML.pickle", "rb"))
    validation_data_ML = pickle.load(
        open(
            "../../Data/VAL-" + dataset_name + "-W" + str(window_size) + "-T" +
            str(task_size) + "-ML.pickle", "rb"))
    test_data = pickle.load(
        open(
            "../../Data/TEST-" + dataset_name + "-W" + str(window_size) +
            "-T" + str(task_size) + "-NOML.pickle", "rb"))
    test_data_ML = pickle.load(
        open(
            "../../Data/TEST-" + dataset_name + "-W" + str(window_size) +
            "-T" + str(task_size) + "-ML.pickle", "rb"))

    results_dict = {}

    for trial in range(lower_trial, upper_trial):

        output_directory = "../../Models/" + dataset_name + "_" + model_name + "_MAML/" + str(
            trial) + "/"
        save_model_file_ = output_directory + save_model_file
        load_model_file_ = output_directory + load_model_file

        try:
            os.mkdir(output_directory)
        except OSError as error:
            print(error)

        f = open(output_directory + "/results3.txt", "a+")
        f.write("Learning rate :%f \n" % learning_rate)
        f.write("Meta-learning rate: %f \n" % meta_learning_rate)
        f.write("Adaptation steps: %f \n" % adaptation_steps)
        f.write("\n")
        f.close()

        if model_name == "LSTM":
            model = LSTMModel(batch_size=batch_size,
                              seq_len=window_size,
                              input_dim=input_dim,
                              n_layers=2,
                              hidden_dim=120,
                              output_dim=1)
        elif model_name == "FCN":
            kernels = [8, 5, 3] if dataset_name != "POLLUTION" else [4, 2, 1]
            model = FCN(time_steps=window_size,
                        channels=[input_dim, 128, 128, 128],
                        kernels=kernels)

        model.cuda()

        maml = l2l.algorithms.MAML(model, lr=learning_rate, first_order=False)
        opt = optim.Adam(maml.parameters(), lr=meta_learning_rate)

        torch.backends.cudnn.enabled = False
        total_num_tasks = train_data_ML.x.shape[0]
        test(maml, model_name, dataset_name, test_data_ML, adaptation_steps,
             learning_rate)
        val_error = test(maml, model_name, dataset_name, validation_data_ML,
                         adaptation_steps, learning_rate)

        early_stopping = EarlyStopping(patience=patience_stopping,
                                       model_file=save_model_file_,
                                       verbose=True)
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt,
                                                               patience=200,
                                                               verbose=True)

        #early_stopping(val_error, maml)

        for iteration in range(epochs):
            # Creates a clone of model
            opt.zero_grad()
            iteration_error = 0.0

            print(iteration)
            for task in range(batch_size):
                learner = maml.clone()
                task = np.random.randint(0, total_num_tasks - horizon)
                #task_qry = np.random.randint(1,horizon+1)

                x_spt, y_spt = train_data_ML[task]
                #x_qry, y_qry = train_data_ML[(task+1):(task+1+horizon)]
                x_qry, y_qry = train_data_ML[task + 1]

                x_qry = x_qry.reshape(-1, window_size, input_dim)
                y_qry = y_qry.reshape(-1, output_dim)

                if model_name == "FCN":
                    x_qry = np.transpose(x_qry, [0, 2, 1])
                    x_spt = np.transpose(x_spt, [0, 2, 1])

                x_spt, y_spt = to_torch(x_spt), to_torch(y_spt)
                x_qry = to_torch(x_qry)
                y_qry = to_torch(y_qry)

                #data augmentation
                epsilon = grid[np.random.randint(0, len(grid))]

                if noise_type == "additive":
                    y_spt = y_spt + epsilon
                    y_qry = y_qry + epsilon
                else:
                    y_spt = y_spt * (1 + epsilon)
                    y_qry = y_qry * (1 + epsilon)

                # Fast adapt
                for step in range(adaptation_steps):

                    pred = learner(x_spt)
                    error = mae(pred, y_spt)
                    learner.adapt(
                        error)  #, allow_unused=True)#, allow_nograd=True)

                pred = learner(x_qry)
                evaluation_error = mae(pred, y_qry)
                iteration_error += evaluation_error

            # Meta-update the model parameters

            iteration_error /= batch_size
            iteration_error.backward()  #retain_graph = True)
            print("loss iteration:", iteration_error.data)
            opt.step()

            if iteration % 1 == 0:
                val_error = test(maml, model_name, dataset_name,
                                 validation_data_ML, adaptation_steps,
                                 learning_rate)
                test_error = test(maml, model_name, dataset_name, test_data_ML,
                                  adaptation_steps, learning_rate)
                scheduler.step(val_error)
                print("Val error:", val_error)
                print("Test error:", test_error)

                early_stopping(val_error, maml)

                if early_stopping.early_stop:
                    print("Early stopping")
                    break

        maml.load_state_dict(torch.load(save_model_file_))
        validation_error = test(maml, model_name, dataset_name,
                                validation_data_ML, adaptation_steps,
                                learning_rate)
        #validation_error2 = test(maml, model_name, dataset_name, validation_data_ML, adaptation_steps, learning_rate, with_early_stopping=True)
        #validation_error3 = test(maml, model_name, dataset_name, validation_data_ML, 10, learning_rate, with_early_stopping=True)
        #validation_error4 = test(maml, model_name, dataset_name, validation_data_ML, 10, learning_rate*0.1, with_early_stopping=True)

        test_error = test(maml, model_name, dataset_name, test_data_ML,
                          adaptation_steps, learning_rate)
        #test_error2 = test(maml, model_name, dataset_name, test_data_ML, adaptation_steps , learning_rate, with_early_stopping=True)
        #test_error3 = test(maml, model_name, dataset_name, test_data_ML, 10 , learning_rate, with_early_stopping=True)
        #test_error4 = test(maml, model_name, dataset_name, test_data_ML, 10, learning_rate*0.1, with_early_stopping=True)

        f = open(output_directory + "/results3.txt", "a+")
        f.write("Dataset :%s \n" % dataset_name)
        f.write("Test error: %f \n" % test_error)
        #f.write("Test error2: %f \n" % test_error2)
        #f.write("Test error3: %f \n" % test_error3)
        #f.write("Test error4: %f \n" % test_error4)

        f.write("Validation error: %f \n" % validation_error)
        #f.write("Validation error2: %f \n" %validation_error2)
        #f.write("Validation error3: %f \n" %validation_error3)
        #f.write("Validation error4: %f \n" %validation_error4)
        f.write("\n")
        f.close()

        results_dict[str(trial) + "_val"] = validation_error
        results_dict[str(trial) + "_test"] = test_error

    np.save(
        "npy_objects/run03_" + dataset_name + "_" + model_name + "_" +
        noise_type + "_" + str(noise_level * 100000) + ".npy", results_dict)
Exemplo n.º 2
0
def test2(maml,
          model_name,
          test_data_ML,
          adaptation_steps,
          learning_rate,
          with_early_stopping=False,
          horizon=10):

    total_tasks_test = len(test_data_ML)
    error_list = []

    learner = maml.clone()  # Creates a clone of model
    learner.cuda()
    accum_error = 0.0
    count = 0

    input_dim = test_data_ML.x.shape[-1]
    window_size = test_data_ML.x.shape[-2]
    output_dim = test_data_ML.y.shape[-1]

    for task in range(0, (total_tasks_test - horizon - 1),
                      total_tasks_test // 100):

        temp_file_idx = test_data_ML.file_idx[task:task + horizon + 1]
        if (len(np.unique(temp_file_idx)) > 1):
            continue

        if model_name == "LSTM":
            model2 = LSTMModel(batch_size=None,
                               seq_len=None,
                               input_dim=input_dim,
                               n_layers=2,
                               hidden_dim=120,
                               output_dim=1)
        elif model_name == "FCN":
            kernels = [8, 5, 3] if window_size != 5 else [4, 2, 1]
            model2 = FCN(time_steps=window_size,
                         channels=[input_dim, 128, 128, 128],
                         kernels=kernels)

        #model2.cuda()
        #model2.load_state_dict(copy.deepcopy(maml.module.state_dict()))
        #opt2 = optim.Adam(model2.parameters(), lr=learning_rate)
        learner = maml.clone()

        x_spt, y_spt = test_data_ML[task]
        x_qry = test_data_ML.x[(task + 1):(task + 1 + horizon)].reshape(
            -1, window_size, input_dim)
        y_qry = test_data_ML.y[(task + 1):(task + 1 + horizon)].reshape(
            -1, output_dim)

        if model_name == "FCN":
            x_qry = np.transpose(x_qry, [0, 2, 1])
            x_spt = np.transpose(x_spt, [0, 2, 1])

        x_spt, y_spt = to_torch(x_spt), to_torch(y_spt)
        x_qry = to_torch(x_qry)
        y_qry = to_torch(y_qry)

        early_stopping = EarlyStopping(patience=2,
                                       model_file="temp/temp_file.pt",
                                       verbose=True)

        #learner.module.train()
        #model2.eval()
        for step in range(adaptation_steps):

            #model2.train()
            pred = learner(x_spt)
            error = mae(pred, y_spt)

            #opt2.zero_grad()
            #error.backward()

            learner.adapt(error)
            #opt2.step()

            if with_early_stopping:
                with torch.no_grad():

                    model2.load_state_dict(
                        copy.deepcopy(learner.module.state_dict()))
                    #model2.eval()
                    pred = model2(x_qry)
                    error = mae(pred, y_qry)
                early_stopping(error, model2)

            if early_stopping.early_stop:
                print("Early stopping")
                break

        if with_early_stopping:
            model2.load_state_dict(torch.load("temp/temp_file.pt"))
        #model2.eval()
        #learner.module.eval()
        pred = learner(x_qry)
        error = mae(pred, y_qry)

        accum_error += error.data
        count += 1

    error = accum_error / count

    return error
Exemplo n.º 3
0
def main(args):

    meta_info = {
        "POLLUTION": [5, 50, 14],
        "HR": [32, 50, 13],
        "BATTERY": [20, 50, 3]
    }

    output_directory = "output/"
    verbose = True
    batch_size = 64
    freeze_model_flag = True

    params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 0}

    dataset_name = args.dataset
    mode = args.mode
    save_model_file = args.save_model_file
    load_model_file = args.load_model_file
    lower_trial = args.lower_trial
    upper_trial = args.upper_trial
    learning_rate = args.learning_rate
    regularization_penalty = args.regularization_penalty
    model_name = args.model
    is_test = args.is_test
    patience_stopping = args.patience_stopping
    epochs = args.epochs
    freeze_model_flag = args.freeze_model

    assert mode in ("WFT", "WOFT", "50"), "Mode was not correctly specified"
    assert model_name in ("FCN", "LSTM"), "Model was not correctly specified"
    assert dataset_name in ("POLLUTION", "HR", "BATTERY")

    window_size, task_size, input_dim = meta_info[dataset_name]
    task_size = args.task_size

    task_size = args.task_size

    train_data = pickle.load(
        open(
            "../Data/TRAIN-" + dataset_name + "-W" + str(window_size) + "-T" +
            str(task_size) + "-NOML.pickle", "rb"))
    validation_data = pickle.load(
        open(
            "../Data/VAL-" + dataset_name + "-W" + str(window_size) + "-T" +
            str(task_size) + "-NOML.pickle", "rb"))
    validation_data_ML = pickle.load(
        open(
            "../Data/VAL-" + dataset_name + "-W" + str(window_size) + "-T" +
            str(task_size) + "-ML.pickle", "rb"))
    test_data = pickle.load(
        open(
            "../Data/TEST-" + dataset_name + "-W" + str(window_size) + "-T" +
            str(task_size) + "-NOML.pickle", "rb"))
    test_data_ML = pickle.load(
        open(
            "../Data/TEST-" + dataset_name + "-W" + str(window_size) + "-T" +
            str(task_size) + "-ML.pickle", "rb"))
    #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    for trial in range(lower_trial, upper_trial):

        output_directory = "../Models/" + dataset_name + "_" + model_name + "/" + str(
            trial) + "/"
        save_model_file_ = output_directory + save_model_file
        load_model_file_ = output_directory + load_model_file

        try:
            os.mkdir(output_directory)
        except OSError as error:
            print(error)

        if mode == "WOFT":

            f = open(output_directory + "/results.txt", "a+")
            f.write("Dataset :%s \n" % dataset_name)
            f.write("Learning rate:%f \n" % learning_rate)
            f.write("Epochs:%f \n" % epochs)
            f.write("Model name:%s \n" % model_name)
            f.close()

            if model_name == "FCN":

                kernels = [8, 5, 3
                           ] if dataset_name != "POLLUTION" else [4, 2, 1]
                train_data.x = np.transpose(train_data.x, [0, 2, 1])
                test_data.x = np.transpose(test_data.x, [0, 2, 1])
                validation_data.x = np.transpose(validation_data.x, [0, 2, 1])

            early_stopping = EarlyStopping(patience=patience_stopping,
                                           model_file=save_model_file_,
                                           verbose=verbose)

            if model_name == "LSTM":
                model = LSTMModel(batch_size=batch_size,
                                  seq_len=window_size,
                                  input_dim=input_dim,
                                  n_layers=2,
                                  hidden_dim=120,
                                  output_dim=1)
            elif model_name == "FCN":
                model = FCN(time_steps=window_size,
                            channels=[input_dim, 128, 128, 128],
                            kernels=kernels)

            model.cuda()

            train_loader = DataLoader(train_data, **params)
            val_loader = DataLoader(validation_data, **params)

            if is_test:
                test_loader = DataLoader(test_data, **params)
            else:
                test_loader = DataLoader(validation_data, **params)

            train(model, train_loader, val_loader, early_stopping,
                  learning_rate, epochs)
            test(model, test_loader, output_directory, save_model_file_)

        elif mode == "WFT":

            f = open(output_directory + "/results.txt", "a+")
            f.write("Dataset :%s \n" % dataset_name)
            f.write("Learning rate:%f \n" % learning_rate)
            f.write("Epochs:%f \n" % epochs)
            f.write("Model name:%s \n" % model_name)
            f.write("Regularization:%s \n" % regularization_penalty)
            f.close()

            save_model_file_ = output_directory + save_model_file
            load_model_file_ = output_directory + load_model_file

            assert save_model_file != load_model_file, "Files cannot be the same"

            n_tasks, task_size, dim, channels = test_data_ML.x.shape if is_test else validation_data_ML.x.shape
            horizon = 10
            test_loss_list1 = []
            test_loss_list2 = []
            initial_test_loss_list1 = []
            initial_test_loss_list2 = []

            if task_size == 50:
                step = n_tasks // 100
            else:
                step = 1

            for task_id in range(0, (n_tasks - horizon - 1), step):

                #check that all files blong to the same domain
                temp_file_idx = test_data_ML.file_idx[
                    task_id:task_id + horizon +
                    1] if is_test else validation_data_ML.file_idx[
                        task_id:task_id + horizon + 1]
                if (len(np.unique(temp_file_idx)) > 1):
                    continue

                if is_test:
                    temp_x_train = test_data_ML.x[task_id]
                    temp_y_train = test_data_ML.y[task_id]

                    temp_x_test1 = test_data_ML.x[(task_id +
                                                   1):(task_id + horizon +
                                                       1)].reshape(
                                                           -1, dim, channels)
                    temp_y_test1 = test_data_ML.y[(task_id +
                                                   1):(task_id + horizon +
                                                       1)].reshape(-1, 1)

                    temp_x_test2 = test_data_ML.x[(task_id + 1)].reshape(
                        -1, dim, channels)
                    temp_y_test2 = test_data_ML.y[(task_id + 1)].reshape(-1, 1)

                else:
                    temp_x_train = validation_data_ML.x[task_id]
                    temp_y_train = validation_data_ML.y[task_id]

                    temp_x_test1 = validation_data_ML.x[(task_id +
                                                         1):(task_id +
                                                             horizon +
                                                             1)].reshape(
                                                                 -1, dim,
                                                                 channels)
                    temp_y_test1 = validation_data_ML.y[(task_id +
                                                         1):(task_id +
                                                             horizon +
                                                             1)].reshape(
                                                                 -1, 1)

                    temp_x_test2 = validation_data_ML.x[(task_id + 1)].reshape(
                        -1, dim, channels)
                    temp_y_test2 = validation_data_ML.y[(task_id + 1)].reshape(
                        -1, 1)

                if model_name == "FCN":

                    kernels = [8, 5, 3
                               ] if dataset_name != "POLLUTION" else [4, 2, 1]
                    temp_x_train = np.transpose(temp_x_train, [0, 2, 1])
                    temp_x_test1 = np.transpose(temp_x_test1, [0, 2, 1])
                    temp_x_test2 = np.transpose(temp_x_test2, [0, 2, 1])
                    #temp_x_val = np.transpose(temp_x_val, [0,2,1])

                early_stopping = EarlyStopping(patience=patience_stopping,
                                               model_file=save_model_file_,
                                               verbose=verbose)

                if model_name == "LSTM":
                    model = LSTMModel(batch_size=batch_size,
                                      seq_len=window_size,
                                      input_dim=input_dim,
                                      n_layers=2,
                                      hidden_dim=120,
                                      output_dim=1)
                elif model_name == "FCN":
                    model = FCN(time_steps=window_size,
                                channels=[input_dim, 128, 128, 128],
                                kernels=kernels)

                model.load_state_dict(torch.load(load_model_file_))

                train_loader = DataLoader(
                    SimpleDataset(x=temp_x_train, y=temp_y_train), **params)
                test_loader1 = DataLoader(
                    SimpleDataset(x=temp_x_test1, y=temp_y_test1), **params)
                test_loader2 = DataLoader(
                    SimpleDataset(x=temp_x_test2, y=temp_y_test2), **params)

                verbose = False

                model.cuda()
                initial_loss1 = test(model, test_loader1, output_directory,
                                     load_model_file_, verbose)
                initial_loss2 = test(model, test_loader2, output_directory,
                                     load_model_file_, verbose)

                if freeze_model_flag:
                    freeze_model(model)

                #early_stopping(initial_loss, model)
                train(model, train_loader, test_loader1, early_stopping,
                      learning_rate, epochs, regularization_penalty)
                early_stopping(0.0, model)
                loss1 = test(model, test_loader1, output_directory,
                             save_model_file_, verbose, True)
                loss2 = test(model, test_loader2, output_directory,
                             save_model_file_, verbose, True)
                print(loss1)

                test_loss_list1.append(loss1)
                initial_test_loss_list1.append(initial_loss1)
                test_loss_list2.append(loss2)
                initial_test_loss_list2.append(initial_loss2)

            f = open(output_directory + "/results.txt", "a+")

            #f.write("Initial Test error1 :%f \n"% np.mean(initial_test_loss_list1))
            f.write("Test error1: %f \n" % np.mean(test_loss_list1))
            f.write("Standard deviation1: %f \n" % np.std(test_loss_list1))
            #f.write("Initial Test error2 :%f \n"% np.mean(initial_test_loss_list2))
            f.write("Test error2: %f \n" % np.mean(test_loss_list2))
            f.write("Standard deviation2: %f \n" % np.std(test_loss_list2))
            f.write("\n")
            f.close()

            #np.save("test_loss_wtf_"+model_name+"_"+dataset_name+"_"+str(trial)+".npy", test_loss_list)

        elif mode == "50":

            assert save_model_file != load_model_file, "Files cannot be the same"

            with open(output_directory + "/results.txt", "a+") as f:
                f.write("Dataset :%s" % dataset_name)
                f.write("\n")

            save_model_file_ = output_directory + save_model_file
            load_model_file_ = output_directory + load_model_file

            if is_test == 0:
                test_data = validation_data

            train_idx, val_idx, test_idx = split_idx_50_50(
                test_data.file_idx) if is_test else split_idx_50_50(
                    validation_data.file_idx)
            n_domains_in_test = np.max(test_data.file_idx) + 1

            test_loss_list = []
            initial_test_loss_list = []

            for domain in range(n_domains_in_test):

                print("Domain:", domain)
                f = open(output_directory + "/results.txt", "a+")
                f.write("Mode :%d" % domain)
                f.write("\n")
                f.close()

                x_test = test_data.x
                y_test = test_data.y
                temp_train_data = SimpleDataset(x=x_test[train_idx[domain]],
                                                y=y_test[train_idx[domain]])

                temp_val_data = SimpleDataset(x=x_test[val_idx[domain]],
                                              y=y_test[val_idx[domain]])

                temp_test_data = SimpleDataset(x=x_test[test_idx[domain]],
                                               y=y_test[test_idx[domain]])

                early_stopping = EarlyStopping(patience=patience_stopping,
                                               model_file=save_model_file_,
                                               verbose=True)

                if model_name == "LSTM":
                    model = LSTMModel(batch_size=batch_size,
                                      seq_len=window_size,
                                      input_dim=input_dim,
                                      n_layers=2,
                                      hidden_dim=120,
                                      output_dim=1)

                elif model_name == "FCN":

                    kernels = [8, 5, 3
                               ] if dataset_name != "POLLUTION" else [4, 2, 1]
                    model = FCN(time_steps=window_size,
                                channels=[input_dim, 128, 128, 128],
                                kernels=kernels)
                    temp_train_data.x = np.transpose(temp_train_data.x,
                                                     [0, 2, 1])
                    temp_test_data.x = np.transpose(temp_test_data.x,
                                                    [0, 2, 1])
                    temp_val_data.x = np.transpose(temp_val_data.x, [0, 2, 1])

                if freeze_model_flag:
                    freeze_model(model)

                model.load_state_dict(torch.load(load_model_file_))
                model.cuda()

                temp_train_loader = DataLoader(temp_train_data, **params)
                temp_val_loader = DataLoader(temp_val_data, **params)
                temp_test_loader = DataLoader(temp_test_data, **params)

                initial_loss = test(model, temp_test_loader, output_directory,
                                    load_model_file_, False)
                train(model, temp_train_loader, temp_val_loader,
                      early_stopping, learning_rate, epochs,
                      regularization_penalty)
                loss = test(model, temp_test_loader, output_directory,
                            save_model_file_, True, True)

                initial_test_loss_list.append(initial_loss)
                test_loss_list.append(loss)

            total_loss = np.mean(test_loss_list)
            initial_loss = np.mean(initial_test_loss_list)
            f = open(output_directory + "/results.txt", "a+")
            f.write("Total error :%f \n" % total_loss)
            f.write("Learning rate: %f \n" % learning_rate)
            f.write("Initial Total error :%f \n" % initial_loss)
            f.write("Std: %f\n" % np.std(test_loss_list))
            f.write("\n")
            f.close()
Exemplo n.º 4
0
def test2(maml, model, model_name, dataset_name, test_data_ML, adaptation_steps, learning_rate, noise_level, noise_type, is_test = True, horizon = 10):
    

    total_tasks_test = len(test_data_ML)
    error_list =  []

    learner = maml.clone()  # Creates a clone of model
    learner.cuda()
    accum_error = 0.0
    accum_std = 0.0
    count = 0.0
    grid = [0., noise_level]

    input_dim = test_data_ML.x.shape[-1]
    window_size = test_data_ML.x.shape[-2]
    output_dim = test_data_ML.y.shape[-1]

    if is_test:
        step = total_tasks_test//100

    else:
        step = 1

    step = 1 if step == 0 else step
    
    for task in range(0, (total_tasks_test-horizon-1), step):

        temp_file_idx = test_data_ML.file_idx[task:task+horizon+1]
        if(len(np.unique(temp_file_idx))>1):
            continue
        
        if model_name == "LSTM":
            model2 = LSTMModel( batch_size=None, seq_len = None, input_dim = input_dim, n_layers = 2, hidden_dim = 120, output_dim =1)
        elif model_name == "LSTM_MRA":
            model2 = LSTMModel_MRA( batch_size=None, seq_len = window_size, input_dim = input_dim, n_layers = 2, hidden_dim = 120, output_dim =1)
        elif model_name == "FCN":
            kernels = [8,5,3] if window_size != 5 else [4,2,1]
            model2 = FCN(time_steps = window_size,  channels=[input_dim, 128, 128, 128] , kernels=kernels)

        
        #model2.cuda()
        #model2.load_state_dict(copy.deepcopy(maml.module.state_dict()))
        #opt2 = optim.Adam(model2.parameters(), lr=learning_rate)
        learner = maml.clone() 

        x_spt, y_spt = test_data_ML[task]
        x_qry = test_data_ML.x[(task+1):(task+1+horizon)].reshape(-1, window_size, input_dim)
        y_qry = test_data_ML.y[(task+1):(task+1+horizon)].reshape(-1, output_dim)
        #x_qry = test_data_ML.x[(task+1)].reshape(-1, window_size, input_dim)
        #y_qry = test_data_ML.y[(task+1)].reshape(-1, output_dim)

        if model_name == "FCN":
            x_qry = np.transpose(x_qry, [0,2,1])
            x_spt = np.transpose(x_spt, [0,2,1])

        x_spt, y_spt = to_torch(x_spt), to_torch(y_spt)
        x_qry = to_torch(x_qry)
        y_qry = to_torch(y_qry)


        epsilon = grid[np.random.randint(0,len(grid))]

        if noise_type == "additive":
            y_spt = y_spt+epsilon
            y_qry = y_qry+epsilon

        else:
            y_spt = y_spt*(1+epsilon)
            y_qry = y_qry*(1+epsilon)

        
        #learner.module.train()
        #model2.eval()
        for step in range(adaptation_steps):

            #model2.train()
            pred = learner(model.encoder(x_spt))
            error = mae(pred, y_spt)

            #opt2.zero_grad()
            #error.backward()
              
            learner.adapt(error)
            #opt2.step()
    

        #model2.eval()
        #learner.module.eval()
        y_pred = learner(model.encoder(x_qry))
        
        y_pred = torch.clamp(y_pred, 0, 1)
        error = mae(y_pred, y_qry)
        
        accum_error += error.data
        accum_std += error.data**2
        count += 1
        
    error = accum_error/count

    print("std:", accum_std/count)
    
    return error   
Exemplo n.º 5
0
def main(args):

    meta_info = {
        "POLLUTION": [5, 50, 14],
        "HR": [32, 50, 13],
        "BATTERY": [20, 50, 3]
    }

    output_directory = "output/"
    horizon = 10
    output_dim = 1

    dataset_name = args.dataset
    save_model_file = args.save_model_file
    load_model_file = args.load_model_file
    lower_trial = args.lower_trial
    upper_trial = args.upper_trial
    learning_rate = args.learning_rate
    meta_learning_rate = args.meta_learning_rate
    adaptation_steps = args.adaptation_steps
    batch_size = args.batch_size
    model_name = args.model
    is_test = args.is_test
    patience_stopping = args.stopping_patience
    epochs = args.epochs
    noise_level = args.noise_level
    noise_type = args.noise_type
    ml_horizon = args.ml_horizon
    experiment_id = args.experiment_id

    window_size, task_size, input_dim = meta_info[dataset_name]

    task_size = args.task_size

    assert model_name in ("FCN", "LSTM"), "Model was not correctly specified"
    assert dataset_name in ("POLLUTION", "HR", "BATTERY")

    grid = [0., noise_level]

    train_data = pickle.load(
        open(
            "../../Data/TRAIN-" + dataset_name + "-W" + str(window_size) +
            "-T" + str(task_size) + "-NOML.pickle", "rb"))
    train_data_ML = pickle.load(
        open(
            "../../Data/TRAIN-" + dataset_name + "-W" + str(window_size) +
            "-T" + str(task_size) + "-ML.pickle", "rb"))
    validation_data = pickle.load(
        open(
            "../../Data/VAL-" + dataset_name + "-W" + str(window_size) + "-T" +
            str(task_size) + "-NOML.pickle", "rb"))
    validation_data_ML = pickle.load(
        open(
            "../../Data/VAL-" + dataset_name + "-W" + str(window_size) + "-T" +
            str(task_size) + "-ML.pickle", "rb"))
    test_data = pickle.load(
        open(
            "../../Data/TEST-" + dataset_name + "-W" + str(window_size) +
            "-T" + str(task_size) + "-NOML.pickle", "rb"))
    test_data_ML = pickle.load(
        open(
            "../../Data/TEST-" + dataset_name + "-W" + str(window_size) +
            "-T" + str(task_size) + "-ML.pickle", "rb"))

    loss_fn = mae

    results_list = []
    results_dict = {}
    results_dict["Experiment_id"] = experiment_id
    results_dict["Model"] = model_name
    results_dict["Dataset"] = dataset_name
    results_dict["Learning rate"] = learning_rate
    results_dict["Noise level"] = noise_level
    results_dict["Task size"] = task_size
    results_dict["Evaluation loss"] = "MAE Test"
    results_dict["Vrae weight"] = "-"
    results_dict["Training"] = "MAML"
    results_dict["ML-Horizon"] = ml_horizon
    results_dict["Meta-learning rate"] = meta_learning_rate

    #loss_fn = nn.SmoothL1Loss()

    for trial in range(lower_trial, upper_trial):

        output_directory = "../../Models/" + dataset_name + "_" + model_name + "_MAML/" + str(
            trial) + "/"

        save_model_file_ = output_directory + experiment_id + "_" + "encoder_" + save_model_file
        save_model_file_2 = output_directory + experiment_id + "_" + save_model_file
        load_model_file_ = output_directory + load_model_file

        try:
            os.mkdir(output_directory)
        except OSError as error:
            print(error)

        with open(output_directory + "/results3.txt", "a+") as f:
            f.write("Learning rate :%f \n" % learning_rate)
            f.write("Meta-learning rate: %f \n" % meta_learning_rate)
            f.write("Adaptation steps: %f \n" % adaptation_steps)
            f.write("Noise level: %f \n" % noise_level)
            f.write("\n")

        if model_name == "LSTM":
            model = LSTMModel(batch_size=batch_size,
                              seq_len=window_size,
                              input_dim=input_dim,
                              n_layers=2,
                              hidden_dim=120,
                              output_dim=1)
            model2 = nn.Linear(120, 1)
        elif model_name == "FCN":
            kernels = [8, 5, 3] if dataset_name != "POLLUTION" else [4, 2, 1]
            model = FCN(time_steps=window_size,
                        channels=[input_dim, 128, 128, 128],
                        kernels=kernels)
            model2 = nn.Linear(128, 1)

        model.cuda()
        model2.cuda()

        maml = l2l.algorithms.MAML(model2, lr=learning_rate, first_order=False)
        opt = optim.Adam(list(maml.parameters()) + list(model.parameters()),
                         lr=meta_learning_rate)

        #torch.backends.cudnn.enabled = False
        total_num_tasks = train_data_ML.x.shape[0]
        #test2(maml, model_name, dataset_name, test_data_ML, adaptation_steps, learning_rate)
        #val_error = test(maml, model_name, dataset_name, validation_data_ML, adaptation_steps, learning_rate)

        early_stopping = EarlyStopping(patience=patience_stopping,
                                       model_file=save_model_file_,
                                       verbose=True)
        early_stopping2 = EarlyStopping(patience=patience_stopping,
                                        model_file=save_model_file_2,
                                        verbose=True)

        #scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt, patience =200, verbose=True)

        #early_stopping(val_error, maml)

        for iteration in range(epochs):
            # Creates a clone of model
            opt.zero_grad()
            iteration_error = 0.0

            print(iteration)
            for task in range(batch_size):
                learner = maml.clone()
                task = np.random.randint(0, total_num_tasks - horizon)

                if train_data_ML.file_idx[task +
                                          1] != train_data_ML.file_idx[task]:
                    continue
                #task_qry = np.random.randint(1,horizon+1)

                x_spt, y_spt = train_data_ML[task]
                #x_qry, y_qry = train_data_ML[(task+1):(task+1+horizon)]
                x_qry, y_qry = train_data_ML[task + ml_horizon]
                #x_qry, y_qry = train_data_ML[task_qry]

                x_qry = x_qry.reshape(-1, window_size, input_dim)
                y_qry = y_qry.reshape(-1, output_dim)

                if model_name == "FCN":
                    x_qry = np.transpose(x_qry, [0, 2, 1])
                    x_spt = np.transpose(x_spt, [0, 2, 1])

                x_spt, y_spt = to_torch(x_spt), to_torch(y_spt)
                x_qry = to_torch(x_qry)
                y_qry = to_torch(y_qry)

                #data augmentation
                epsilon = grid[np.random.randint(0, len(grid))]

                if noise_type == "additive":
                    y_spt = y_spt + epsilon
                    y_qry = y_qry + epsilon

                else:
                    y_spt = y_spt * (1 + epsilon)
                    y_qry = y_qry * (1 + epsilon)

                # Fast adapt
                for _ in range(adaptation_steps):

                    pred = learner(model.encoder(x_spt))
                    error = loss_fn(pred, y_spt)
                    learner.adapt(
                        error)  #, allow_unused=True)#, allow_nograd=True)

                pred = learner(model.encoder(x_qry))
                evaluation_error = loss_fn(pred, y_qry)
                iteration_error += evaluation_error
                #evaluation_error.backward()

            # Meta-update the model parameters
            #for p in maml.parameters():
            #p.grad.data.mul_(1.0 / batch_size)
            iteration_error /= batch_size
            iteration_error.backward()  #retain_graph = True)
            #print("loss iteration:",iteration_error)
            opt.step()

            if iteration % 1 == 0:
                val_error = test2(loss_fn,
                                  maml,
                                  model,
                                  model_name,
                                  dataset_name,
                                  validation_data_ML,
                                  adaptation_steps,
                                  learning_rate,
                                  noise_level,
                                  noise_type,
                                  horizon=10)
                test_error = test2(loss_fn,
                                   maml,
                                   model,
                                   model_name,
                                   dataset_name,
                                   test_data_ML,
                                   adaptation_steps,
                                   learning_rate,
                                   0,
                                   noise_type,
                                   horizon=10)
                #scheduler.step(val_error)
                print("Val error:", val_error)
                print("Test error:", test_error)

                if iteration > 10:
                    early_stopping(val_error, model)
                    early_stopping2(val_error, maml)

                if early_stopping.early_stop:
                    print("Early stopping")
                    break

        model.load_state_dict(torch.load(save_model_file_))
        maml.load_state_dict(torch.load(save_model_file_2))

        validation_error = test2(loss_fn, maml, model, model_name,
                                 dataset_name, validation_data_ML,
                                 adaptation_steps, learning_rate, 0,
                                 noise_type)
        initial_val_error = test2(loss_fn, maml, model, model_name,
                                  dataset_name, validation_data_ML, 0,
                                  learning_rate, 0, noise_type)

        test_error = test2(loss_fn, maml, model, model_name, dataset_name,
                           test_data_ML, adaptation_steps, learning_rate, 0,
                           noise_type)
        initial_test_error = test2(loss_fn, maml, model, model_name,
                                   dataset_name, test_data_ML, 0,
                                   learning_rate, 0, noise_type)

        test_error2 = test2(loss_fn,
                            maml,
                            model,
                            model_name,
                            dataset_name,
                            test_data_ML,
                            adaptation_steps,
                            learning_rate,
                            0,
                            noise_type,
                            horizon=1)
        initial_test_error2 = test2(loss_fn,
                                    maml,
                                    model,
                                    model_name,
                                    dataset_name,
                                    test_data_ML,
                                    0,
                                    learning_rate,
                                    0,
                                    noise_type,
                                    horizon=1)

        with open(output_directory + "/results3.txt", "a+") as f:
            f.write("Dataset :%s \n" % dataset_name)
            f.write("Test error: %f \n" % test_error)
            f.write("Test error2: %f \n" % test_error2)
            f.write("Initial Test error: %f \n" % initial_test_error)
            f.write("Initial Test error2: %f \n" % initial_test_error2)
            f.write("Validation error: %f \n" % validation_error)
            f.write("Initial validation error: %f \n" % initial_val_error)

            f.write("\n")

        print("Adaptation_steps:", adaptation_steps)
        temp_results_dict = copy.copy(results_dict)
        temp_results_dict["Trial"] = trial
        temp_results_dict["Adaptation steps"] = adaptation_steps
        temp_results_dict["Horizon"] = 10
        temp_results_dict["Value"] = float(test_error)
        temp_results_dict["Val error"] = float(validation_error)
        temp_results_dict["Final_epoch"] = iteration
        results_list.append(temp_results_dict)

        temp_results_dict = copy.copy(results_dict)
        temp_results_dict["Trial"] = trial
        temp_results_dict["Adaptation steps"] = 0
        temp_results_dict["Horizon"] = 10
        temp_results_dict["Value"] = float(initial_test_error)
        temp_results_dict["Val error"] = float(initial_val_error)
        temp_results_dict["Final_epoch"] = iteration
        results_list.append(temp_results_dict)

        temp_results_dict = copy.copy(results_dict)
        temp_results_dict["Trial"] = trial
        temp_results_dict["Adaptation steps"] = adaptation_steps
        temp_results_dict["Horizon"] = 1
        temp_results_dict["Value"] = float(test_error2)
        temp_results_dict["Final_epoch"] = iteration
        results_list.append(temp_results_dict)

        temp_results_dict = copy.copy(results_dict)
        temp_results_dict["Trial"] = trial
        temp_results_dict["Adaptation steps"] = 0
        temp_results_dict["Horizon"] = 1
        temp_results_dict["Value"] = float(initial_test_error2)
        temp_results_dict["Final_epoch"] = iteration
        results_list.append(temp_results_dict)

    try:
        os.mkdir("../../Results/json_files/")
    except OSError as error:
        print(error)

    with open("../../Results/json_files/" + experiment_id + ".json",
              'w') as outfile:
        json.dump(results_list, outfile)