示例#1
0
def test(args, ckpt_file):
    print("========== In the test step ==========")
    batch_size = args["batch_size"]

    lr = args["learning_rate"]
    momentum = args["momentum"]
    epochs = args["train_epochs"]
    train_split = args["split_train"]

    CSV_FILE = "./data/mushrooms.csv"
    dataset = MushroomDataset(CSV_FILE)

    test_dataset = torch.utils.data.Subset(
        dataset, list(range(int(train_split * len(dataset)), len(dataset)))
    )

    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

    net = NeuralNet()
    net = net.to(device=device)

    net.load_state_dict(
        torch.load(os.path.join(args["EXPT_DIR"], ckpt_file + ".pth"))["model"]
    )

    net.eval()
    predix = 0
    predictions = {}
    truelabels = {}

    n_val = len(test_dataset)
    with tqdm(total=n_val, desc="Testing round", unit="batch", leave=False) as pbar:
        for step, (batch_x, batch_y) in enumerate(test_loader):
            with torch.no_grad():
                batch_x = batch_x.to(device)
                batch_y = batch_y.to(device)

                prediction = net(batch_x)

            for logit, label in zip(prediction, batch_y):
                predictions[predix] = logit.cpu().numpy().tolist()
                truelabels[predix] = label.cpu().numpy().tolist()
                predix += 1

            pbar.update()

    truelabels_ = []
    predictions_ = []

    for key in predictions:
        if predictions[key][0] > 0.5:
            predictions_.append(1)
        else:
            predictions_.append(0)

    for key in truelabels:
        truelabels_.append(truelabels[key][0])

    truelabels = truelabels_
    predictions = predictions_

    # print("predictions",predictions)

    return {"predictions": predictions, "labels": truelabels}
示例#2
0

def ShowResult(net, dataReader, title):
    # draw train data
    X, Y = dataReader.XTrain, dataReader.YTrain
    plt.plot(X[:, 0], Y[:, 0], '.', c='b')
    # create and draw visualized validation data
    TX = np.linspace(0, 1, 100).reshape(100, 1)
    TY = net.inference(TX)
    plt.plot(TX, TY, 'x', c='r')
    plt.title(title)
    plt.show()


if __name__ == '__main__':
    dataReader = DataReader(train_data_name, test_data_name)
    dataReader.ReadData()
    dataReader.GenerateValidationSet()

    n_input, n_hidden, n_output = 1, 3, 1
    eta, batch_size, max_epoch = 0.5, 10, 10000
    eps = 0.001

    hp = HyperParameters(n_input, n_hidden, n_output, eta, max_epoch,
                         batch_size, eps, NetType.Fitting,
                         InitialMethod.Xavier)
    net = NeuralNet(hp, "save")

    net.train(dataReader, 50, True)
    net.ShowTrainingHistory()
    ShowResult(net, dataReader, hp.toString())
示例#3
0
# hyperparameter
batch_size = 8
input_size = len(x_train[0])
hidden_size = 8
output_size = len(tags)
learning_rate = 0.001
num_epochs = 1000

dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=2)

device = torch.device('cude' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size)

# loss and optimizer

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

for epoch in range(num_epochs):
    for (words, labels) in train_loader:
        words = words.to(device)
        labels = labels.to(device)

        # forward
        outputs = model(words)
        loss = criterion(outputs, labels)
示例#4
0
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (i + 1) % 10 == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
                    epoch + 1, 30, i + 1, total_step, loss.item()))

    Q_preds = []
    Q_targets = []
    with torch.no_grad():
        for (hour_features, daily_features), labels in test_dataloader:
            outputs = model((hour_features, daily_features))
            Q_preds.append(outputs.numpy())
            Q_targets.append(labels.numpy())
        Q_preds = np.array(Q_preds)
        Q_targets = np.array(Q_targets)
        Q_preds = Q_preds.reshape((-1, 1))
        Q_targets = Q_targets.reshape((-1, 1))
        print(cv_rmse(torch.from_numpy(Q_preds), torch.from_numpy(Q_targets)))
        print(Q_preds[:24])
        print(Q_targets[:24])

    torch.save(model.state_dict(), 'hournn_' + energy + '.pth')


if __name__ == "__main__":
    # train_hour_model('W')
    model = NeuralNet(4, 256, 1)
    # model = concatNN(20, 256, 3, 1, "add")
    train_daily_model('Q', model, "regression")
示例#5
0
文件: run_me.py 项目: uriyapes/VCL_DC
def get_model(dataset, logger, model_params):
    model = NeuralNet(dataset, logger, model_params.dict)
    return model
示例#6
0
import cv2
import numpy as np
from model import NeuralNet

net = NeuralNet()

# creating a 600 x 600 pixels canvas for mouse drawing
canvas = np.ones((600, 600), dtype="uint8") * 255
# designating a 400 x 400 pixels point of interest on which digits will be drawn
canvas[100:500, 100:500] = 0

start_point = None
end_point = None
is_drawing = False


def draw_line(img, start_at, end_at):
    cv2.line(img, start_at, end_at, 255, 15)


def on_mouse_events(event, x, y, flags, params):
    global start_point
    global end_point
    global canvas
    global is_drawing
    if event == cv2.EVENT_LBUTTONDOWN:
        if is_drawing:
            start_point = (x, y)
    elif event == cv2.EVENT_MOUSEMOVE:
        if is_drawing:
            end_point = (x, y)
示例#7
0
# test_dataset = BF.BreakfastNaive(visual_feat_path_test, text_path_test, map_path)
print("Test set loaded")

print("Dataset size")
print(len(train_dataset))
print(len(test_dataset))

train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False)

# MODEL
model = NeuralNet(input_size, hidden_size, num_classes).to(device)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# TRAIN
total_step = len(train_loader)
for epoch in range(num_epochs):
    correct_predictions = 0
    total_predictions = 0
    for i, data_batch in enumerate(train_loader):
        vis_feats = data_batch['vis_feats'].to(device)
        labels = data_batch['labels'].to(
            device)  # TODO dont send one of them to gpu and see what happens?
        # print("DEBUG vis feats", vis_feats.size())
print('Using bayesian ridge regression to evaluate...')
bay_reg = LinearModel.BayesianRegression(X_train, y_train, X_test, y_test,
                                         cross_validation_k)
accuracy, f1_score, bay_reg_y_prediction, bay_reg_cv_score = bay_reg.evaluate(
    penalty='l1')
print('Achieved accuracy of %s and f1 score of %s' % (accuracy, f1_score))

print('Using decision tree to evaluate...')
tree = TreeModel.DecisionTree(X_train, y_train, X_test, y_test,
                              cross_validation_k)
accuracy, f1_score, dec_tree_y_prediction, dec_tree_cv_score = tree.evaluate(
    create_image=False)
print('Achieved accuracy of %s and f1 score of %s' % (accuracy, f1_score))

print('Using neural net to evaluate...')
nn = NeuralNet.DeepModel(X_train, y_train, X_validation, y_validation, X_test,
                         y_test)
accuracy, f1_score, nn_y_predictions = nn.evaluate()
print('Achieved accuracy of %s and f1 score of %s' % (accuracy, f1_score))

print('Using optimized neural net to evaluate...')
opt_nn = NeuralNet.OptimizedDeepModel(db_name=database)
accuracy, f1_score, optimized_nn_y_predictions = opt_nn.evaluate()
print('Achieved accuracy of %s and f1 score of %s' % (accuracy, f1_score))

linewidth = 1
# Create ROC figure
plt.figure(1, dpi=3600, figsize=(8, 5))

fpr, tpr, thresholds = roc_curve(y_test, lin_reg_y_prediction)
plt.plot(fpr, tpr, lw=linewidth, label='linear regression')
示例#9
0
def main(read_dir="high.json", write_dir="high.pth"):

    base_json_dir = "resource/jsonFile/" + read_dir
    base_pth_dir = "resource/pthFile/" + write_dir

    with open(base_json_dir, "r", encoding="UTF-8") as file:
        intents = json.load(file)

    all_words = []
    tags = []
    xy = []

    for intent in intents['intents']:
        tag = intent['tag']
        tags.append(tag)
        for pattern in intent['patterns']:
            w = tokenize(pattern)
            all_words.extend(w)
            xy.append((w, tag))

    ignore_word = [",", ".", "'", '"', "?", "!", "^", "@", "#", "_", "-",
                   "~"]  #we need, regular expression
    all_words = [stem(w) for w in all_words
                 if w not in ignore_word]  #this is better than using map
    all_words = sorted(set(all_words))
    tags = sorted(set(tags))  # for order

    X_train = []
    Y_train = []
    for (pattern_sentence, tag) in xy:
        bag = bag_of_words(pattern_sentence, all_words)
        X_train.append(bag)

        label = tags.index(tag)
        Y_train.append(label)

    X_train = np.array(X_train)
    Y_train = np.array(Y_train)

    # Hyper-parameters
    num_epochs = 1000
    batch_size = 8
    learning_rate = 0.001
    input_size = len(X_train[0])
    hidden_size = 8
    output_size = len(tags)

    class ChatDataset(Dataset):
        def __init__(self):
            self.n_samples = len(X_train)
            self.x_data = X_train
            self.y_data = Y_train

        # support indexing such that dataset[i] can be used to get i-th sample
        def __getitem__(self, index):
            return self.x_data[index], self.y_data[index]

        # we can call len(dataset) to return the size
        def __len__(self):
            return self.n_samples

    dataset = ChatDataset()
    train_loader = DataLoader(dataset=dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=0)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = NeuralNet(input_size, hidden_size, output_size).to(device)

    # Loss and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # Train the model
    for epoch in range(num_epochs):
        for (words, labels) in train_loader:
            words = words.to(device)
            labels = labels.to(dtype=torch.long).to(device)

            # Forward pass
            outputs = model(words)
            # if y would be one-hot, we must apply
            # labels = torch.max(labels, 1)[1]
            loss = criterion(outputs, labels)

            # Backward and optimize
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        if (epoch + 1) % 100 == 0:
            print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')

    print(f'final loss: {loss.item():.4f}')

    data = {
        "model_state": model.state_dict(),
        "input_size": input_size,
        "hidden_size": hidden_size,
        "output_size": output_size,
        "all_words": all_words,
        "tags": tags
    }

    torch.save(data, base_pth_dir)

    print(f'training complete. write_dir saved to {base_pth_dir}')
示例#10
0
    attributes = json.load(f)

# Load the creadential saved during training

FILE = "data.pth"
data = torch.load(FILE)

# load all the saved values
inputSize = data["input_size"]
outputSize = data["output_size"]
hiddenSize = data["hidden_size"]
allWords = data["allWords_size"]
tags = data["tags"]
modelState = data["modelState"]

model = NeuralNet(inputSize, hiddenSize, outputSize)
# synthesize next Html tag or attribute provided


def synthesizeTag(sentence, botName, recType):
    # load the statedictionary
    model.load_state_dict(modelState)
    model.eval()

    # tokenize find BOG predict the class for new sentence
    x = tokenizeAndStemSpoken(sentence, allWords)

    # find the predicted output
    output = model(x)
    _, predicted = torch.max(output, dim=1)
    tag = tags[predicted.item()]
示例#11
0
def run_simulation(verbose=False,
                   LRs=[0.025],
                   save_model=[],
                   learn=[],
                   plot=True,
                   save_experience=[]):
    """
    Runs the simulation with the given parameters
    """
    EPOCHS = 100
    n_plots = 4
    n = int(EPOCHS / n_plots)

    moves_per_game = 20

    for LR in LRs:
        avg_rewards_goat = []
        avg_rewards_tiger = []
        avg_loss_goat = []
        avg_loss_tiger = []

        if plot and len(learn) != 0:
            plt.ion()
            iters = []
            tiger_plot = PeriodicPlotter('r', x_lim=(0, EPOCHS), y_lim=(0, 10))
            goat_plot = PeriodicPlotter('b', x_lim=(0, EPOCHS), y_lim=(0, 10))

        # Choosing which model to use
        tigerModel = NeuralNet()
        # tigerModel = th.load('model-tiger-big.pt')
        goatModel = NeuralNet()
        # goatModel = th.load('goatModel-learn.pt')

        for i in tqdm(range(EPOCHS)):
            # Initialize this round of game
            brd = Board()
            brd.init_game()

            # Initialize the agents and set their model
            goat_ = GoatAgent(brd, LR=LR)
            goat_.set_model(goatModel)

            tiger_ = TigerAgent(brd, LR)
            tiger_.set_model(tigerModel)

            players = [goat_, tiger_]

            if verbose:
                if i != 0:
                    sys.stdout.write("\033[6F")

                print(brd)

            over = False  # Whether the current game is over or not
            for _ in range(moves_per_game):

                for player in players:
                    try:
                        player.make_move()
                        # print('player', player)

                        if verbose:
                            sys.stdout.write("\033[6F")
                            print(brd)
                            time.sleep(0.5)
                    except Exception as e:  # When the player doesn't have a move
                        over = True
                        break
                if over:
                    break

            # Save the experience from this iteration
            if Goat in save_experience:
                goat_.save_experience('experience-goat-dqn-test.txt')

            if Tiger in save_experience:
                tiger_.save_experience('experience-tiger-dqn-test.txt')

            # Aggregate the total reward in this round

            # avg_rewards_goat.append(goat_reward/len(goat_.data))

            # Learn from this experience
            if plot and i % n == 0:
                iters.append(i)

            if Goat in learn:
                goat_.prepare_data()
                goat_.learn()

                goat_reward = 0
                for exp in goat_.data:
                    goat_reward += exp[-1]

                if plot and i % n == 0:
                    loss_goat = goat_.test('experience-goat-dqn-test.txt')
                    avg_loss_goat.append(loss_goat)
                    goat_plot.plot(iters, avg_loss_goat)

            if Tiger in learn:
                tiger_.prepare_data()
                tiger_.learn()

                tiger_reward = 0
                for exp in tiger_.data:
                    tiger_reward += exp[-1]

                if plot and i % n == 0:
                    loss_tiger = tiger_.test('experience-tiger-test.txt')
                    avg_rewards_tiger.append(tiger_reward / len(tiger_.data))
                    avg_loss_tiger.append(loss_tiger)
                    tiger_plot.plot(iters, avg_loss_tiger)

    if plot:
        # plt.legend(['LR: %f'% LR for LR in LRs])
        plt.show()
        plt.savefig('result1.png')

    # Save the currently trained model
    if Goat in save_model:
        th.save(goatModel, 'model-goat-dqn.pt')

    if Tiger in save_model:
        th.save(tigerModel, 'model-tiger-dqn.pt')
示例#12
0
"""
Here we are predicting output for fizzbuzz game
with our neural network
"""
import numpy as np

from model import NeuralNet
from generate_data import inputs, labels

# Min is 1 and max is 1024
first = 101
last = 1024
X = inputs(first, last)
y = labels(first, last)
model = NeuralNet(input_shape=(10, ))
model.compile(lr=0.001)
model.train(X, y, batch_size=32, epochs=1000)

first_test = 1
last_test = 100
X_test = inputs(first_test, last_test)
y_pred = model.predict(X_test)
iter = range(first_test, last_test + 1)
for i in range(last_test - first_test + 1):
    if y_pred[i] == 0:
        pred = i + 1
    elif y_pred[i] == 1:
        pred = "fizz"
    elif y_pred[i] == 2:
        pred = "buzz"
    else:
示例#13
0
    output_size = len(tags)
    input_size = len(X_train[0])
    learning_rate = 0.00025
    num_epochs = 1000

    dataset = ChatDataset()
    train_loader = DataLoader(dataset=dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=2)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print("Using device: ", device)

    model = NeuralNet(input_size, hidden_size, output_size).to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 learning_rate,
                                 weight_decay=.005)

    print("Starting training")
    for epoch in range(num_epochs):
        for (words, labels) in train_loader:
            words = words.to(device)
            labels = labels.to(dtype=torch.long).to(device)

            outputs = model(words)
            loss = criterion(outputs, labels)
示例#14
0
batchSize = 8
learningRate = 0.001
numberOfEpochs = 2000

inputSize = len(XTrain[0])
hiddenSize = 8
outputSize = len(tags)

dataset = ChatDataset()
trainLoader = DataLoader(dataset=dataset,
                         batch_size=batchSize,
                         shuffle=True,
                         num_workers=0)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeuralNet(inputSize, hiddenSize, outputSize).to(device)

# loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learningRate)

for epoch in range(numberOfEpochs):
    for (words, labels) in trainLoader:
        words = words.to(device)
        labels = labels.to(device)
        lables = labels.to(dtype=torch.long)

        # forward
        outputs = model(words)
        loss = criterion(outputs, labels)
    value = np.random.randint(2, size=(N, ))
    value = value.astype('float32')

    end = np.random.randint(2, size=(N, 19, 19, 2))
    end = end.astype('float32')

    golois.getBatch(input_data, policy, value, end)
else:
    input_data = np.load('./input_data.npy')
    policy = np.load('./policy.npy')
    value = np.load('./value.npy')
    end = np.load('./end.npy')

GoNeuralNet = NeuralNet(config.REG_CONST, config.LEARNING_RATE,
                        (19, 19, planes), moves, config.HIDDEN_CNN_LAYERS,
                        config.MOMENTUM)

GoNeuralNet.summary()

GoNeuralNet.fit(input_data, {
    'policy': policy,
    'value': value
},
                epochs=config.EPOCHS,
                verbose=1,
                validation_split=0.1,
                batch_size=config.BATCH_SIZE)

GoNeuralNet.save_model(0.1)
    ### SET-UP DATASET ###
    train_files = list(
        pathlib.Path(f'{args.samples_path}/train').glob('sample_*.pkl'))
    valid_files = list(
        pathlib.Path(f'{args.samples_path}/valid').glob('sample_*.pkl'))

    log(f"{len(train_files)} training samples", logfile)
    log(f"{len(valid_files)} validation samples", logfile)

    train_files = [str(x) for x in train_files]

    valid_files = [str(x) for x in valid_files]
    valid_data = LazyDataset(valid_files)
    valid_data = DataLoader(valid_data, batch_size=valid_batch_size)
    model = NeuralNet(device).to(device)

    # Loss and optimizer
    criterion = nn.CrossEntropyLoss().to(device)
    if args.optimizer == 'Adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    elif args.optimizer == 'RMSprop':
        optimizer = torch.optim.RMSprop(model.parameters(), lr=lr)
    else:
        raise Exception('Invalid optimizer')

    # Should set lr *= 0.2 when .step() is called
    lr_scheduler = ExponentialLR(optimizer, 0.2)
    ### TRAINING LOOP ###
    best_loss = np.inf
    plateau_count = 0
示例#17
0
import cv2
import numpy as np
from model import NeuralNet

net = NeuralNet()

# creating a 600 x 600 pixels canvas for mouse drawing
canvas = np.ones((600, 600), dtype="uint8") * 255
# designating a 400 x 400 pixels point of interest on which digits will be drawn
canvas[100:500, 100:500] = 0

start_point = None
end_point = None
is_drawing = False


def draw_line(img, start_at, end_at):
    cv2.line(img, start_at, end_at, 255, 15)


def on_mouse_events(event, x, y, flags, params):
    global start_point
    global end_point
    global canvas
    global is_drawing
    if event == cv2.EVENT_LBUTTONDOWN:
        is_drawing = True
        if is_drawing:
            start_point = (x, y)
    elif event == cv2.EVENT_MOUSEMOVE:
        if is_drawing:
示例#18
0
    with open(result_file, 'w', newline='') as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()
        policy_type = 'gcnn'
        policy_name = 'baseline'

        print(f"{policy_type}:{policy_name}...")
        for seed in seeds:
            rng = np.random.RandomState(seed)
            torch.manual_seed(rng.randint(np.iinfo(int).max))

            policy = {}
            policy['name'] = policy_name
            policy['type'] = policy_type

            policy['model'] = NeuralNet(device).to(device)
            policy['model'].load_state_dict(torch.load(f"trained_models/{args.problem}/baseline/{seed}/{args.lr}/{args.optimizer}/best_params.pkl"))

            test_data = LazyDataset(test_files)
            test_data = DataLoader(test_data, batch_size=test_batch_size)

            policy['model'].eval()
            with torch.no_grad():
                test_kacc = process(policy, test_data, top_k)
            print(f"  {seed} " + " ".join([f"acc@{k}: {100*acc:4.1f}" for k, acc in zip(top_k, test_kacc)]))

            writer.writerow({
                **{
                    'policy': f"{policy['type']}:{policy['name']}",
                    'seed': seed,
                },
示例#19
0
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with open('intents.json', 'r') as f:
    intents = json.load(f)

FILE = "data.pth"
data = torch.load(FILE)

input_size = data["input_size"]
hidden_size = data["hidden_size"]
output_size = data["output_size"]

all_words = data["all_words"]
tags = data["tags"]
model_state = data["model_state"]

model = NeuralNet(input_size=input_size, hidden_size=hidden_size, num_classes=output_size).to(device)
model.load_state_dict(model_state)
model.eval()

bot_name = "Venancio"
print("Chatiamos! escribe 'salir' para salir")
while True:
    sentence = input('Tu: ')
    if sentence == "salir":
        break

    sentence = tokenize(sentence)
    X = bag_of_words(sentence, all_words)
    X = X.reshape(1, X.shape[0])
    X = torch.from_numpy(X)
示例#20
0
X_train = np.array(X_train)
y_train = np.array(y_train)

# Hyperparameters
batch_size = 8
hidden_size = 8
output_size = len(tags)
input_size = len(X_train[0]) #len(all_words)
learning_rate = 0.001
num_epochs = 1000

dataset = ChatDataset(X_train, y_train)
train_loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=0)

device = torch_device('cuda' if torch_cuda.is_available() else 'cpu')
model = NeuralNet(input_size, hidden_size, output_size).to(device)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=learning_rate)

# Our training loop
for epoch in range(num_epochs):
    for (words, labels) in train_loader:
        words = words.to(device)
        labels = labels.to(device)

        # Forward
        outputs = model(words)
        loss = criterion(outputs, labels.long())
示例#21
0
model_state = torch.load(FILE)
chat_data = pd.read_csv("database.csv")
response_data = {
    tag.strip(): sentence
    for tag, sentence in zip(chat_data["tag"].values,
                             chat_data["sentence"].values)
}

input_size = model_state["input_size"]
hidden_size = model_state["hidden_size"]
output_size = model_state["output_size"]
all_words = model_state["all_words"]
tags = model_state["tags"]
model_state = model_state["model_state"]

model = NeuralNet(input_size, hidden_size, output_size)

model.load_state_dict(model_state)

while True:
    sentence = input("Humano:")
    X = bag_of_words(tokenize(sentence), all_words)
    X = X.reshape(1, X.shape[0])
    X = torch.from_numpy(X)
    output = model(X)

    # Se obtiene el valor maximo de salida
    value, predicted = torch.max(output, dim=1)

    # Se calcula cual eficaz es el maximo valor
    probs = torch.softmax(output, dim=1)
示例#22
0
    for e, b in zip(en, body):
        print(d)
        print([id2word_snli[word_id] for word_id in head])
        print([id2word_snli[word_id] for word_id in b])
        print(e)
'''
# embedding = preprocess.embedding
vocab_size = len(preprocess.word2id)
print("vocab_size:{0}".format(vocab_size))

device = torch.device(
    'cuda:{0}'.format(gpu) if torch.cuda.is_available() else 'cpu')
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if argvs[1] == 'NN':
    model = NeuralNet(input_size, hidden_size, num_layers, num_classes,
                      vocab_size, emb_size, dropout_rate, cosine_similarity,
                      device).to(device)
if argvs[1] == 'LSTM':
    model = LSTM(input_size, hidden_size, num_layers, num_classes, vocab_size,
                 emb_size, embedding, dropout_rate, cosine_similarity,
                 device).to(device)
if argvs[1] == 'CE':
    model = CE(input_size, hidden_size, num_layers, num_classes, vocab_size,
               emb_size, embedding, dropout_rate, cosine_similarity,
               device).to(device)
if argvs[1] == 'ACE':
    model = ACE(input_size, hidden_size, num_layers, num_classes, vocab_size,
                emb_size, embedding, dropout_rate, cosine_similarity,
                device).to(device)
if argvs[1] == 'WACE':
    model = WACE(input_size, hidden_size, num_layers, num_classes, vocab_size,
示例#23
0
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

with open('intents.json', 'r') as json_data:
    intents = json.load(json_data)

FILE = "data.pth"
data = torch.load(FILE)

input_size = data["input_size"]
hidden_size = data["hidden_size"]
output_size = data["output_size"]
all_words = data['all_words']
tags = data['tags']
model_state = data["model_state"]

model = NeuralNet(input_size, hidden_size, output_size).to(device)
model.load_state_dict(model_state)
model.eval()

bot_name = "Mahasiswa Unpam"
print("Mulai Chat! (type 'quit' to exit)")
while True:
    # sentence = "do you use credit cards?"
    sentence = input("Kamu: ")
    if sentence == "quit":
        break

    sentence = tokenize(sentence)
    X = bag_of_words(sentence, all_words)
    X = X.reshape(1, X.shape[0])
    X = torch.from_numpy(X).to(device)
示例#24
0
# %%
# Looking at one element of the dataset
example = iter(train_loader)
samples, labels = example.next()
print(samples.shape, labels.shape)

for i in range(9):
    plt.subplot(3, 3, i + 1)
    plt.imshow(samples[i][0], cmap='gray')
plt.show()

# %%
# Traning the model
model = NeuralNet(input_size=input_size,
                  hidden_size=hidden_size,
                  output_size=num_classes,
                  lr=lr).to(device)

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr)

for epoch in range(num_epochs):

    for i, (samples, labels) in enumerate(train_loader):

        # Reshaping the images
        samples = samples.reshape(-1, input_size).to(device)
        labels = labels.to(device)

        predictions = model(samples)
        loss = criterion(predictions, labels)
示例#25
0
    def __len__(self):
        return self.n_samples
    
batch_size = 8
hidden_size = 8
output_size = len(tags)
input_size = len(X_train[0])
learning_rate = 0.001
num_epochs = 100

    
dataset = ChatDataset()
train_loader = DataLoader(dataset = dataset, batch_size = batch_size, shuffle = True, num_workers = 0)

device = 'cpu'
model = NeuralNet(input_size, hidden_size, output_size).to(device)

criterion = nn.CrossEntropyLoss()
optimizer= torch.optim.Adam(model.parameters(), lr = learning_rate)

for epoch in range(num_epochs):
    for (words, labels) in train_loader:
        words = words.to(device)
        labels = labels.to(device)
    
        outputs = model(words)
        loss = criterion(outputs,labels)
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
示例#26
0
if __name__ == "__main__":

    data = []
    labels = []

    print("Loading Data")
    imagePaths = sorted(list(paths.list_images("dataset")))
    random.seed(42)
    random.shuffle(imagePaths)

    # loop over the input images
    for imagePath in imagePaths:
        image = cv2.imread(imagePath)
        image = cv2.resize(image, (IMAGE_DIMS[0], IMAGE_DIMS[1]))
        image = img_to_array(image)
        data.append(image)

        # update labels list
        label = imagePath.split(os.path.sep)[-2]
        labels.append(label)

    data = np.array(data, dtype="float") / 255.0
    labels = np.array(labels)

    for i in range(len(labels)):
        labels[i] = nameToNum[labels[i]]

    labels = to_categorical(labels, num_classes=2)

    model = NeuralNet(data, labels)
示例#27
0
def train(cpu, args):
    rank = args.nr * args.cpus + cpu
    dist.init_process_group(
        backend="gloo",
        init_method="file:///Users/thanhsom/Downloads/computer_network_extra/model/setup.txt",
        world_size=args.world_size,
        rank=rank
    )
    torch.manual_seed(0)

    # Hyperparameters:
    batch_size = 100  # NOTE: If ran out of memory, try changing this value to 64 or 32
    learning_rate = 0.0001

    # Create model:
    model = NeuralNet()
    # Define loss function and optimizer:
    loss_fn = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), learning_rate)
    # Wrap the model for ddp:
    model = nn.parallel.DistributedDataParallel(model, device_ids=None)
    # Data loading:
    train_dataset = torchvision.datasets.MNIST(
        root='./data',
        train=True,
        transform=transforms.ToTensor(),
        download=True,
    )

    train_size = int(args.scale * len(train_dataset))
    test_size = len(train_dataset) - train_size

    train_dataset = torch.utils.data.random_split(train_dataset, [train_size, test_size])[0]
    # Sampling the dataset to avoid same inputs order:
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_dataset,
        num_replicas=args.world_size,
        rank=rank
    )
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        shuffle=False,
        num_workers=0,
        pin_memory=True,
        sampler=train_sampler
    )

    start = datetime.now()
    total_step = len(train_loader)
    lossVal = []
    scale = 10
    print(args.epochs)
    for epoch in range(args.epochs):
        for i, (images, labels) in enumerate(train_loader):
            # Forward pass:
            outputs = model(images)
            loss = loss_fn(outputs, labels)

            # Backward pass:
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            # For logging:
            if (i + 1) % batch_size and cpu == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format
                      (epoch + 1, args.epochs, i + 1, total_step, loss.item()))

    if cpu == 0:
        print("Training completed in: " + str(datetime.now() - start))

    PATH = "trained/"+args.type+"/"+args.address+".pt"
    torch.save(model, PATH)
示例#28
0
def main():
    bet_tracker = BetTracker()

    match_data = read_match_data(season='2017-2018')

    player_data = read_player_data(season='2017-2018')

    net = NeuralNet()

    bank = [100]

    all_odds = []

    for match in match_data:

        print(match['info']['date'], match['info']['home team'],
              match['info']['away team'])

        home_players_matched = match_lineups_to_fifa_players(
            match['info']['home lineup names'],
            match['info']['home lineup numbers'],
            match['info']['home lineup nationalities'],
            constants.LINEUP_TO_PLAYER_TEAM_MAPPINGS['ALL']
            [match['info']['home team']], match['info']['season'], player_data)
        away_players_matched = match_lineups_to_fifa_players(
            match['info']['away lineup names'],
            match['info']['away lineup numbers'],
            match['info']['away lineup nationalities'],
            constants.LINEUP_TO_PLAYER_TEAM_MAPPINGS['ALL']
            [match['info']['away team']], match['info']['season'], player_data)

        home_feature_vector = create_feature_vector_from_players(
            home_players_matched)
        away_feature_vector = create_feature_vector_from_players(
            away_players_matched)

        feature_vector = np.array(home_feature_vector +
                                  away_feature_vector).reshape(-1, 36)

        feature_vector = normalise_features(feature_vector)

        probabilities = net.predict(feature_vector)

        pred_home_odds, pred_draw_odds, pred_away_odds = [
            1 / x for x in probabilities[0]
        ]

        home_odds, draw_odds, away_odds = match['info']['home odds'], match[
            'info']['draw odds'], match['info']['away odds']

        all_odds.append((pred_home_odds, home_odds))
        all_odds.append((pred_away_odds, away_odds))

        if pred_home_odds < home_odds < 3.2 and 0.02 <= probabilities[0][
                0] - 1 / home_odds:
            stake = calculate_stake(home_odds,
                                    probability=1 / pred_home_odds,
                                    method='kelly') * bet_tracker.bankroll
            profit = stake * home_odds - stake
            bet = Bet(true_odds=home_odds,
                      predicted_odds=pred_home_odds,
                      stake=stake,
                      profit=profit,
                      match=match,
                      type='home')
            bet_tracker.make_bet(bet)
            if match['info']['home goals'] > match['info']['away goals']:
                bet_tracker.bet_won()
            else:
                bet_tracker.bet_lost()
            bank.append(bet_tracker.bankroll)
        elif pred_away_odds < away_odds < 3.2 and 0.02 <= probabilities[0][
                2] - 1 / away_odds:
            stake = calculate_stake(away_odds,
                                    probability=1 / pred_away_odds,
                                    method='kelly') * bet_tracker.bankroll
            profit = stake * away_odds - stake
            bet = Bet(true_odds=away_odds,
                      predicted_odds=pred_away_odds,
                      stake=stake,
                      profit=profit,
                      match=match,
                      type='away')
            bet_tracker.make_bet(bet)
            if match['info']['home goals'] < match['info']['away goals']:
                bet_tracker.bet_won()
            else:
                bet_tracker.bet_lost()
            bank.append(bet_tracker.bankroll)

    return bet_tracker, bank, all_odds
    args = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # set the seed for generating random numbers
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)

    # get train loader
    train_loader = _get_train_loader(args.batch_size,
                                     args.data_dir)  # data_dir from above..

    ## TODO:  Build the model by passing in the input params
    # To get params from the parser, call args.argument_name, ex. args.epochs or ards.hidden_dim
    # Don't forget to move your model .to(device) to move to GPU , if appropriate
    model = NeuralNet(args.input_dim, args.hidden_dim,
                      args.output_dim).to(device)

    # Given: save the parameters used to construct the model
    save_model_params(model, args.model_dir)

    ## TODO: Define an optimizer and loss function for training
    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    criterion = nn.MSELoss()

    # Trains the model (given line of code, which calls the above training function)
    # This function *also* saves the model state dictionary
    train(model, train_loader, args.epochs, optimizer, criterion, device)
示例#30
0
def train(args, labeled, resume_from, ckpt_file):
    print("========== In the train step ==========")
    batch_size = args["batch_size"]
    lr = args["learning_rate"]
    momentum = args["momentum"]
    epochs = args["train_epochs"]

    train_split = args["split_train"]

    CSV_FILE = "./data/mushrooms.csv"
    dataset = MushroomDataset(CSV_FILE)

    train_dataset = torch.utils.data.Subset(
        dataset, list(range(int(train_split * len(dataset))))
    )

    train_subset = Subset(train_dataset, labeled)

    train_loader = DataLoader(train_subset, batch_size=batch_size, shuffle=True)

    net = NeuralNet()
    net = net.to(device=device)

    criterion = torch.nn.BCELoss()
    optimizer = optim.SGD(net.parameters(), lr=float(lr), momentum=momentum)

    if resume_from is not None:
        ckpt = torch.load(os.path.join(args["EXPT_DIR"], resume_from + ".pth"))
        net.load_state_dict(ckpt["model"])
        optimizer.load_state_dict(ckpt["optimizer"])
    else:
        getdatasetstate(args)

    net.train()

    for epoch in tqdm(range(args["train_epochs"]), desc="Training"):

        running_loss = 0

        for i, batch in enumerate(train_loader, start=0):
            data, labels = batch

            data = data.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()
            output = net(data)
            loss = criterion(output, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            if i % 1000:
                print(
                    "epoch: {} batch: {} running-loss: {}".format(
                        epoch + 1, i + 1, running_loss / 1000
                    ),
                    end="\r",
                )
                running_loss = 0

    print("Finished Training. Saving the model as {}".format(ckpt_file))

    ckpt = {"model": net.state_dict(), "optimizer": optimizer.state_dict()}
    torch.save(ckpt, os.path.join(args["EXPT_DIR"], ckpt_file + ".pth"))

    return