def single_test(loaded_model, fileName='rtt_series/valid_data/5021.csv'):
    # Test for an input
    #test = np.array([189.21973,189.105955,189.0866,189.19152,189.18938,189.21477,189.363065,189.060735,189.018825,189.497595,189.460615,194.76307,196.72467,189.280095,189.014585,189.02478,189.03338,189.087215,88.99757,189.041955])
    #test = csvio.csv2list("rtt_series/real_trace_labelled/11119.csv", "rtt")
    test = csvio.csv2list(fileName, "rtt", sep=';', decimal='.')
    l = max(test)
    plt.plot(test)

    testLen = len(test)
    min_test = min(test)

    # Pre-Treatment
    test = test.reshape(1, testLen, 1)

    # Prediction
    temp = loaded_model.predict(test)
    temp = temp.reshape(testLen)
    #print("temp =")
    #print(temp)
    res = np.zeros(testLen)
    for i in range(testLen):
        if temp[i] >= 0.5:
            res[i] = 1
    cp = csvio.csv2list(fileName, "cp", sep=';', decimal='.')
    print("cp =")
    np.set_printoptions(threshold=np.nan)
    print(cp)
    print(sum(cp))

    print("res =")
    np.set_printoptions(threshold=np.nan)
    print(res)
    print(sum(res))
    '''
    for i in range(len(cp)):
        if (cp[i]==1):
            print('position cp: ')
            print(i)
    for i in range(len(res)):
        if(res[i]==1):
            print('position res')
            print(i)
    '''

    print("evaluation = ")
    a = eval.evaluation(cp, res)
    for i in range(len(res)):
        if (res[i] > 0):
            res[i] = l

    plt.plot(res, 'r')
    plt.show()
    print(a)
    return a
Example #2
0
def process(cf):
    ### Following is the plotting alogrithm #############
    if cf.plot_real_wind:
        print('plot_real_wind')
        plot_real_wind(cf)
    if cf.plot_real_wind_multiprocessing:
        print('plot_real_wind_multiprocessing')
        plot_real_wind_multiprocessing(cf)
    if cf.plt_forecast_wind_train:
        print('plot_forecast_wind_train')
        plt_forecast_wind_train(cf)
    if cf.plt_forecast_wind_train_multiprocessing:
        print('plt_forecast_wind_train_multiprocessing')
        plt_forecast_wind_train_multiprocessing(cf)
    if cf.plt_forecast_wind_test:
        print('plt_forecast_wind_test')
        plt_forecast_wind_test(cf)
    if cf.plt_forecast_wind_test_multiprocessing:
        print('plt_forecast_wind_test_multiprocessing')
        plt_forecast_wind_test_multiprocessing(cf)
    if cf.plot_all_wind:
        print('Draw weather')
        plot_all_wind(cf)

    if cf.plot_all_wind_new:
        print('Draw weather: wind')
        plot_all_wind_new(cf)
    if cf.plot_all_rainfall:
        print('Draw weather: rainfall')
        plot_all_rainfall(cf)

    if cf.plot_wind_with_rainfall:
        print('plot_wind_with_rainfall')
        plot_wind_with_rainfall(cf)

    if cf.plot_multation:
        print('Draw weather: wind')
        plot_multation(cf)


    ### Following is the A Star alogrithm #############
    if cf.A_star_search_2D:
        print('A_star_search_2D')
        A_star_2d_hourly_update_route(cf)

    # This is one of the core algorithm
    if cf.A_star_search_3D:
        print('A_star_search_3D')
        A_star_search_3D(cf)

    if cf.A_star_search_3D_multiprocessing:
        print('A_star_search_3D_multiprocessing')
        A_star_search_3D_multiprocessing(cf)

    if cf.A_star_search_3D_multiprocessing_multicost:
        print('A_star_search_3D_multiprocessing_multicost')
        A_star_search_3D_multiprocessing_multicost(cf)

    if cf.A_star_search_3D_multiprocessing_rainfall_wind:
        print('A_star_search_3D_multiprocessing_rainfall_wind')
        A_star_search_3D_multiprocessing_rainfall_wind(cf)

    if cf.A_star_search_3D_multiprocessing_rainfall_wind_hour_min:
        print('A_star_search_3D_multiprocessing_rainfall_wind_hour_min')
        A_star_search_3D_multiprocessing_rainfall_wind_hour_min(cf)

    if cf.A_star_fix_missing:
        print('A_star_fix_missing')
        A_star_fix_missing(cf)

    ### Following is the RL alogrithm #############
    if cf.reinforcement_learning_solution:
        print('reinforcement_learning_solution')
        reinforcement_learning_solution(cf)

    if cf.reinforcement_learning_solution_new:
        print('reinforcement_learning_solution_new')
        reinforcement_learning_solution_new(cf)

    if cf.reinforcement_learning_solution_multiprocessing:
        print("reinforcement_learning_solution_multiprocessing")
        reinforcement_learning_solution_multiprocessing(cf)

    if cf.reinforcement_learning_solution_wind_and_rainfall:
        print('reinforcement_learning_solution_wind_and_rainfall')
        reinforcement_learning_solution_wind_and_rainfall(cf)

    if cf.reinforcement_learning_solution_multiprocessing_wind_and_rainfall:
        print('reinforcement_learning_solution_multiprocessing_wind_and_rainfall')
        reinforcement_learning_solution_multiprocessing_wind_and_rainfall(cf)

    ### Following is the submission script #############
    if cf.submission_dummy:
        print("submission")
        submit_phase(cf)

    if cf.collect_csv_for_submission_fraction:
        print('collect_csv_for_submission_fraction')
        collect_csv_for_submission_fraction(cf)

    ### Following is the evaluation script #############
    if cf.evaluation:
        print('evaluation')
        total_penalty, crash_time_stamp, average_wind, max_wind = evaluation(cf, cf.csv_for_evaluation)

        print(int(np.sum(np.sum(total_penalty))))
        print(total_penalty.astype('int'))
        print(crash_time_stamp.astype('int'))
        np.set_printoptions(precision=2)
        print(average_wind)
        print(max_wind)
        print(np.sum(total_penalty.astype('int') == 1440))
    if cf.evaluation_with_rainfall:
        print('evaluation_with_rainfall')
        total_penalty, crash_time_stamp, average_wind, max_wind, average_rain, max_rain = evaluation_with_rainfall(cf)
        print(int(np.sum(np.sum(total_penalty))))
        print(total_penalty.astype('int'))
        print(crash_time_stamp.astype('int'))
        np.set_printoptions(precision=2)
        print(average_wind)
        print(max_wind)
        print(average_rain)
        print(max_rain)
        print(np.sum(total_penalty.astype('int') == 1440))

    # visualisation for evaluation
    if cf.evaluation_plot:
        print('evaluation_plot')
        evaluation_plot(cf)
    if cf.evaluation_plot_multi:
        print('evaluation_plot_multi')
        evaluation_plot_multi(cf)
    if cf.evaluation_plot_real_with_mean:
        print('evaluation_plot_real_with_mean')
        evaluation_plot_real_with_mean(cf)

    ### weather prediction
    if cf.wp_predict_weather:
        print('weather: predict weather data')
        wp_predict_weather(cf)

    #### assignment algorithm #############
    if cf.assignment_for_A_star_route:
        print('assignment_for_A_star_route')
        assignment_for_A_star_route(cf)

    if cf.assignment_for_A_star_route_10min:
        print('assignment_for_A_star_route_10min')
        assignment_for_A_star_route_10min(cf)

    if cf.assignment_for_A_star_route_min:
        print('assignment_for_A_star_route')
        assignment_for_A_star_route_min(cf)

    #### save cost #############
    if cf.save_costs_multiprocessing:
        print('complete and save_costs_multiprocessing')
        save_costs_multiprocessing(cf)
Example #3
0
def train_GCN(tree_dict, x_train, x_val, x_test, counters):

    train_losses, train_accuracies = [], []
    validation_losses, validation_accuracies = [], []
    test_losses, test_accuracies = [], []

    # -------------
    #     MODEL
    # -------------
    model = Network(5000, 64, 64, settings).to(device)

    # -----------------
    #     OPTIMIZER
    # -----------------
    BU_params = []
    BU_params += list(map(id, model.rumor_GCN_0.BURumorGCN.conv1.parameters()))
    BU_params += list(map(id, model.rumor_GCN_0.BURumorGCN.conv2.parameters()))
    base_params = filter(lambda p: id(p) not in BU_params, model.parameters())
    optimizer = torch.optim.Adam(
        [{
            'params': base_params
        }, {
            'params': model.rumor_GCN_0.BURumorGCN.conv1.parameters(),
            'lr': lr / 5
        }, {
            'params': model.rumor_GCN_0.BURumorGCN.conv2.parameters(),
            'lr': lr / 5
        }],
        lr=lr,
        weight_decay=weight_decay)

    early_stopping = EarlyStopping(patience=patience,
                                   verbose=True,
                                   model_path=MODEL_PATH)

    val_dataset = load_snapshot_dataset_val_or_test(dataset_name, tree_dict,
                                                    x_val)
    val_loader = DataLoader(val_dataset,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=5)

    for epoch in range(num_epochs):
        # train_dataset, val_dataset, test_dataset = load_snapshot_dataset(dataset_name, tree_dict, x_train, x_val, x_test)
        # train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=10)
        # val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=5)  # TODO: move out epoch
        # test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=5)

        # for dropedge

        with torch.cuda.device(device):
            torch.cuda.empty_cache()  # TODO: CHECK
        train_dataset = load_snapshot_dataset_train(dataset_name, tree_dict,
                                                    x_train)
        train_loader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=5)

        # TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO:
        # del train_dataset
        # del train_loader
        # TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO: TODO:

        # ---------------------
        #         TRAIN
        # ---------------------
        model.train()
        batch_train_losses = []
        batch_train_accuracies = []

        for batch_index, batch_data in enumerate(train_loader):
            snapshots = []
            for i in range(snapshot_num):
                snapshots.append(batch_data[i].to(device))

            out_labels = model(snapshots)

            loss = F.nll_loss(out_labels, batch_data[0].y)
            del snapshots
            # nn.CrossEntropyLoss = nn.LogSoftmax + nn.NLLLoss
            # loss = criterion(out_labels, batch_data[0].y)  # TODO:

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch_train_loss = loss.item()
            _, pred = out_labels.max(dim=-1)
            correct = pred.eq(batch_data[0].y).sum().item()
            batch_train_acc = correct / len(batch_data[0].y)

            batch_train_losses.append(batch_train_loss)
            batch_train_accuracies.append(batch_train_acc)
            print(
                "Iter {:02d} | CV {:02d} | Epoch {:03d} | Batch {:02d} | Train_Loss {:.4f} | Train_Accuracy {:.4f}"
                .format(counters['iter'], counters['CV'], epoch, batch_index,
                        batch_train_loss, batch_train_acc))

        train_losses.append(np.mean(batch_train_losses))  # epoch
        train_accuracies.append(np.mean(batch_train_accuracies))

        del train_dataset
        del train_loader

        # ------------------------
        #         VALIDATE
        # ------------------------
        batch_val_losses = []  # epoch
        batch_val_accuracies = []
        batch_eval_results = []

        model.eval()
        # TODO: no_grad
        # with torch.no_grad():

        for batch_data in val_loader:
            snapshots = []
            for i in range(snapshot_num):
                snapshots.append(batch_data[i].to(device))
            with torch.no_grad():  # HERE
                val_out = model(snapshots)
                val_loss = F.nll_loss(val_out, batch_data[0].y)
            del snapshots

            batch_val_loss = val_loss.item()
            _, val_pred = val_out.max(dim=1)
            correct = val_pred.eq(batch_data[0].y).sum().item()
            batch_val_acc = correct / len(batch_data[0].y)

            batch_val_losses.append(batch_val_loss)
            batch_val_accuracies.append(batch_val_acc)
            batch_eval_results.append(evaluation(val_pred, batch_data[0].y))

        validation_losses.append(np.mean(batch_val_losses))
        validation_accuracies.append(np.mean(batch_val_accuracies))
        validation_eval_result = merge_batch_eval_list(batch_eval_results)

        print("---------------------" * 3)
        print("eval_result:", validation_eval_result)
        print("---------------------" * 3)

        print(
            "Iter {:03d} | CV {:02d} | Epoch {:05d} | Val_Loss {:.4f} | Val_Accuracy {:.4f}"
            .format(counters['iter'], counters['CV'], epoch,
                    np.mean(batch_val_losses), np.mean(batch_val_accuracies)))
        # append_results("eval result: " + str(eval_result))
        append_results(
            "Iter {:03d} | CV {:02d} | Epoch {:05d} | Val_Loss {:.4f} | Val_Accuracy {:.4f}"
            .format(counters['iter'], counters['CV'], epoch,
                    np.mean(batch_val_losses), np.mean(batch_val_accuracies)))

        early_stopping(validation_losses[-1], model, 'BiGCN', dataset_name,
                       validation_eval_result)
        if early_stopping.early_stop:
            print("Early Stopping")
            validation_eval_result = early_stopping.eval_result
            model.load_state_dict(torch.load(MODEL_PATH, map_location=device))
            model.to(device)
            break

    del val_dataset
    del val_loader

    # --------------------
    #         TEST
    # --------------------

    with torch.cuda.device(device):
        torch.cuda.empty_cache()  # TODO: CHECK
    test_dataset = load_snapshot_dataset_val_or_test(dataset_name, tree_dict,
                                                     x_test)
    test_loader = DataLoader(test_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=5)

    batch_test_losses = []  # epoch
    batch_test_accuracies = []
    batch_test_eval_results = []

    model.eval()
    for batch_data in test_loader:
        snapshots = []
        for i in range(snapshot_num):
            snapshots.append(batch_data[i].to(device))

        with torch.no_grad():
            test_out = model(snapshots,
                             print_attention=True)  # early stopped model
            test_loss = F.nll_loss(test_out, batch_data[0].y)
        del snapshots

        batch_test_loss = test_loss.item()
        _, test_pred = test_out.max(dim=1)

        correct = test_pred.eq(batch_data[0].y).sum().item()
        batch_test_acc = correct / len(batch_data[0].y)

        batch_test_losses.append(batch_test_loss)
        batch_test_accuracies.append(batch_test_acc)
        batch_test_eval_results.append(evaluation(test_pred, batch_data[0].y))

    test_losses.append(np.mean(batch_test_losses))
    test_accuracies.append(np.mean(batch_test_accuracies))
    test_eval_result = merge_batch_eval_list(batch_test_eval_results)

    accs = test_eval_result['acc_all']
    F0 = test_eval_result['C0']['F1']  # F1
    F1 = test_eval_result['C1']['F1']
    F2 = test_eval_result['C2']['F1']
    F3 = test_eval_result['C3']['F1']

    counters['CV'] += 1
    losses = [train_losses, validation_losses, test_losses]
    accuracies = [train_accuracies, validation_accuracies, test_accuracies]

    append_results("losses: " + str(losses))
    append_results("accuracies: " + str(accuracies))
    append_results("val eval result: " + str(validation_eval_result))
    append_results("test eval result: " + str(test_eval_result))
    print("Test_Loss {:.4f} | Test_Accuracy {:.4f}".format(
        np.mean(test_losses), np.mean(test_accuracies)))

    print("------------------------------------")
    print("------------------------------------")
    print("------------------------------------")
    print("\n")
    exit()

    return losses, accuracies, accs, [F0, F1, F2, F3]