コード例 #1
0
def run():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--no-cuda', action='store_true', default=True,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--test-AWGN', action='store_true', default=False,
                        help='Test AWGN transformation')
    parser.add_argument('--sigma', type=float, default=0.0, metavar='N',
                        help='standard deviation of AWGN')
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    test_kwargs = {'batch_size': args.test_batch_size}

    ## import model from main and import weights
    model = main.Net().to(device)
    model.load_state_dict(torch.load("mnist_cnn.pt"))
    # Print model's state_dict
    print("Model's state_dict:")
    for param_tensor in model.state_dict():
        print(param_tensor, "\t", model.state_dict()[param_tensor].size())

    for i, sigma in enumerate([0, 0.3, 0.6, 1.0]):
        transform = transforms.Compose([transforms.ToTensor(), main.AWGN(sigma), transforms.Normalize((0.1307,), (0.3081,))])

        test_dataset = datasets.MNIST('../data', train=False, transform=transform)

        test_loader = torch.utils.data.DataLoader(test_dataset, **test_kwargs)

        print(f"\nAccuracy for AWGN with a standard deviation of {sigma}")
        main.test(model, device, test_loader)
コード例 #2
0
actions[16] = (generate_actions(16, list(np.arange(1001, 1500, 99) / 1000), df,
                                False))  # PRIMARY MORTGAGE INSURANCE PERCENT
actions[18] = (generate_actions(18, list(np.arange(1001, 1500, 99) / 1000), df,
                                False))  # CoBorrower Credit Score
actions[19] = (generate_actions(19, list(range(1, 4)), df,
                                True))  #MORTGAGE INSURANCE TYPE
actions[20] = (generate_actions(20, list(range(1, 3)), df,
                                True))  #RELOCATION MORTGAGE INDICATOR

# Flattened list of actions
actions_list = []
for key in actions:
    actions_list.extend(actions[key])
MEAN_ACTION_COST = statistics.mean(
    [action.cost for action in actions_list]
)  # List Comprehension - create a list of only the costs of all of the actions

features_tensor = torch.from_numpy(features_np_array).type(torch.FloatTensor)
root = Tree(features_tensor)
root.action = Action(
    action_id=0,
    action_name="current_state",
    action_value=0,
    cost_value=0,
    feature_number=0
)  #We create a fictive action for the root, just to make sure the algorithm runs well. We will delete this from the proposed list.

net = main.Net(DROPOUT_RATE=0.1)
net.load_state_dict(torch.load('models/final_weights.pkl', map_location='cpu'))

res = monte_carlo_tree_search(root)
コード例 #3
0
import pandas as pd
import torch
import main

# Hyper parameters
BATCH_SIZE = 20
LEARNING_RATE = 0.001  # The optimal learning rate for the Adam optimizer
BEST_EPOCH = 5
DROPOUT_RATE = 0.1

val_dataset = main.Dataset('dataset/prep_unbiased/val.txt')
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=BATCH_SIZE)

net = main.Net(DROPOUT_RATE)
net.load_state_dict(torch.load('models/split_10_90_batchsize_' + str(BATCH_SIZE) + '_lr_' + str(LEARNING_RATE) + '_dropout_'+ str(DROPOUT_RATE) + '_epoch_' + str(BEST_EPOCH) +'.pkl'))

print(main.compute_loss(net, val_dataloader))
print()
def hello():
    print('hello called')
    origination_channel = request.args.get('origination_channel', type=float)
    seller_name = request.args.get('seller_name', type=float)
    interest_rate = request.args.get('interest_rate', type=float)
    upb = request.args.get('upb', type=float)
    orig_loan_t = request.args.get('orig_loan_t', type=float)
    total_price = request.args.get('total_price', type=float)
    first_lien = request.args.get('first_lien', type=float)
    current_amount = request.args.get('current_amount', type=float)
    second_amount = request.args.get('second_amount', type=float)
    num_borr = request.args.get('num_borr', type=float)
    monthly_payments = request.args.get('monthly_payments', type=float)
    income = request.args.get('income', type=float)
    borrower_credit_score = request.args.get('borrower_credit_score',
                                             type=float)
    first_time = request.args.get('first_time', type=float)
    loan_purp = request.args.get('loan_purp', type=float)
    num_units = request.args.get('num_units', type=float)
    occ_type = request.args.get('occ_type', type=float)
    zip = request.args.get('zip', type=float)
    co_credit_score = request.args.get('co_credit_score', type=float)
    ins_perc = request.args.get('ins_perc', type=float)
    ins_type = request.args.get('ins_type', type=float)
    reloc_ind = request.args.get('reloc_ind', type=float)
    state = request.args.get('state', type=float)

    stats = pd.read_csv('dataset\statistics.csv')
    stats_mean = stats.iloc[1]
    stats_std = stats.iloc[2]

    # Other parameters we need to calculate:
    ltv = upb / total_price * 100
    cltv = (first_lien + current_amount + second_amount) / total_price * 100
    dti = monthly_payments / income * 100
    if interest_rate == -1:
        interest_rate = stats_mean[3]

    # Build feature list for new person
    new_person = np.array([
        origination_channel, seller_name, interest_rate, upb, orig_loan_t, ltv,
        cltv, num_borr, dti, borrower_credit_score, first_time, loan_purp,
        num_units, occ_type, 1.0, state, zip, ins_perc, co_credit_score,
        ins_type, reloc_ind
    ])

    # Instansiate net
    net = main.Net(DROPOUT_RATE=0.0001)
    net.load_state_dict(
        torch.load('models/final_weights.pkl', map_location='cpu'))

    # Call the net with the person's online data
    new_person_np = new_person
    new_person = pd.DataFrame(new_person).transpose().astype(float)
    new_person_normalized = preprocessor.norm_features(new_person,
                                                       montecarlo.stats)
    new_person_normalized = torch.tensor(new_person_normalized.values).float()
    net_out = float(net.forward(new_person_normalized))

    # Either tell the client his application will be approved, or call monte-carlo code to suggest
    print(net_out)
    if round(net_out) == 0:
        print("Mortgage request is approved")
        return str("Mortgage request is approved")
    else:
        print("Mortgage request is declined")
        features_tensor = torch.from_numpy(new_person_np).type(
            torch.FloatTensor)
        root = montecarlo.Tree(features_tensor)
        root.action = montecarlo.Action(
            action_id=0,
            action_name="current_state",
            action_value=0,
            cost_value=0,
            feature_number=0
        )  # We create a fictive action for the root, just to make sure the algorithm runs well. We will delete this from the proposed list.
        plan = montecarlo.monte_carlo_tree_search(root)
        return plan
コード例 #5
0
# -*- coding: utf-8 -*-#

# ---------------------------------------------
# Name:         testModule
# Description:
# Author:       Laity
# Date:         2021/9/29
# ---------------------------------------------
import torch
import main
import getData
import matplotlib.pyplot as plt

net = main.Net()
dic = torch.load('net.pt')
net.load_state_dict(dic)
testData, testLabel = getData.getTestData()

cha = [
    '零', '一', '二', '三', '四', '五', '六', '七', '八', '九', '十', '百', '千', '万', '亿'
]

acc = 0
for i in range(getData.test_size):  # len(testLabel)
    outputs = net(testData[i])
    res = 0
    for j in range(15):
        if outputs[0][j] == max(outputs[0]):
            res = j
            break
    print('=======================')