예제 #1
0
def run_training():
    if use_same_seed_checkbox.isChecked():
        model.set_seed(int(pyro_seed.text()))
    model.tau = float(tau.text())
    model.complexity_lower_bound = float(complexity_lower_bound.text())
    if refresh_data_checkbox.isChecked():
        model.refresh_data()
    global rule
    train_accuracy, rule = model.search_rule(int(training_steps.text()))
    training_accuracy.setText("Training accuracy: " + str(train_accuracy))
    training_rule.setText("Learned rule: \n              " + model.rule_string)
    training_complexity.setText("Final rule complexity: " +
                                str(model.rule_complexity))
예제 #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--no_cuda", action="store_true", default=False)
    parser.add_argument("--input_file", default="saves/model.pt", type=str)
    parser.add_argument("--data_dir", default="data", type=str)
    parser.add_argument("--gpu_number", default=0, type=int)
    args = parser.parse_args()

    model.set_seed(5, no_cuda=args.no_cuda)
    data_loader = data.SSTDataLoader(args.data_dir)
    conv_rnn = torch.load(args.input_file)
    if not args.no_cuda:
        torch.cuda.set_device(args.gpu_number)
        conv_rnn.cuda()
    _, _, test_set = data_loader.load_sst_sets()

    conv_rnn.eval()
    test_in, test_out = conv_rnn.convert_dataset(test_set)
    scores = conv_rnn(test_in)
    n_correct = (torch.max(scores, 1)[1].view(
        len(test_set)).data == test_out.data).sum()
    accuracy = n_correct / len(test_set)
    print("Test set accuracy: {}".format(accuracy))
예제 #3
0
파일: train.py 프로젝트: wibruce/castor
def train(**kwargs):
    mbatch_size = kwargs["mbatch_size"]
    n_epochs = kwargs["n_epochs"]
    restore = kwargs["restore"]
    verbose = not kwargs["quiet"]
    lr = kwargs["lr"]
    weight_decay = kwargs["weight_decay"]
    seed = kwargs["seed"]

    if not kwargs["no_cuda"]:
        torch.cuda.set_device(kwargs["gpu_number"])
    model.set_seed(seed)
    embed_loader = data.SSTEmbeddingLoader("data")
    if restore:
        conv_rnn = torch.load(kwargs["input_file"])
    else:
        id_dict, weights, unk_vocab_list = embed_loader.load_embed_data()
        word_model = model.SSTWordEmbeddingModel(id_dict, weights,
                                                 unk_vocab_list)
        if not kwargs["no_cuda"]:
            word_model.cuda()
        conv_rnn = model.ConvRNNModel(word_model, **kwargs)
        if not kwargs["no_cuda"]:
            conv_rnn.cuda()

    conv_rnn.train()
    criterion = nn.CrossEntropyLoss()
    parameters = list(filter(lambda p: p.requires_grad, conv_rnn.parameters()))
    optimizer = torch.optim.Adam(parameters, lr=lr, weight_decay=weight_decay)
    train_set, dev_set, test_set = data.SSTDataset.load_sst_sets("data")

    collate_fn = conv_rnn.convert_dataset
    train_loader = utils.data.DataLoader(train_set,
                                         shuffle=True,
                                         batch_size=mbatch_size,
                                         collate_fn=collate_fn)
    dev_loader = utils.data.DataLoader(dev_set,
                                       batch_size=len(dev_set),
                                       collate_fn=collate_fn)
    test_loader = utils.data.DataLoader(test_set,
                                        batch_size=len(test_set),
                                        collate_fn=collate_fn)

    def evaluate(loader, dev=True):
        conv_rnn.eval()
        for m_in, m_out in loader:
            scores = conv_rnn(*m_in)
            loss = criterion(scores, m_out).item()
            n_correct = (torch.max(scores, 1)[1].view(
                m_in[0].size(0)).data == m_out.data).float().sum().item()
            accuracy = n_correct / m_in[0].size(0)
            if dev and accuracy >= evaluate.best_dev:
                evaluate.best_dev = accuracy
                print("Saving best model ({})...".format(accuracy))
                torch.save(conv_rnn, kwargs["output_file"])
            if verbose:
                print("{} set accuracy: {}, loss: {}".format(
                    "dev" if dev else "test", accuracy, loss))
        conv_rnn.train()

    evaluate.best_dev = 0

    for epoch in range(n_epochs):
        print("Epoch number: {}".format(epoch), end="\r")
        if verbose:
            print()
        i = 0
        for (j, (train_in,
                 train_out)), _ in zip(enumerate(train_loader),
                                       tqdm(range(len(train_loader)))):
            optimizer.zero_grad()
            scores = conv_rnn(*train_in)
            loss = criterion(scores, train_out)
            loss.backward()
            optimizer.step()
        evaluate(dev_loader)
    evaluate(test_loader, dev=False)
    return evaluate.best_dev
예제 #4
0
import pandas as pd

env = gym.make('AuctionEmulator-v0')
env.seed(0)

train_data = pd.read_csv('data/ipinyou/1458/train.log.txt', sep="\t")

# Set the budgets for each episode according to Cai et al. (2017)
CPM_train = 1000 * (train_data.payprice.sum() / len(train_data))
budget = CPM_train * (10**-3) * 96
eval_budgets = [budget / 2, budget / 4, budget / 8, budget / 16, budget / 32]

for e_budget in eval_budgets:

    # Initialize the bidding agent
    set_seed()
    agent = RlBidAgent()
    agent.ctl_lambda = 0.01

    # Load the saved models
    checkpoint = torch.load('models/model_state_350.tar')
    agent.dqn_agent.qnetwork_local.load_state_dict(checkpoint['local_q_model'])
    agent.dqn_agent.optimizer.load_state_dict(checkpoint['q_optimizer'])
    agent.reward_net.reward_net.load_state_dict(checkpoint['rnet'])
    agent.reward_net.optimizer.load_state_dict(checkpoint['rnet_optimizer'])

    # init the current budget for each episode
    agent.episode_budgets = [e_budget, e_budget, e_budget]

    # start evaluating
    obs, done = env.reset()
예제 #5
0
def train(**kwargs):
    mbatch_size = kwargs["mbatch_size"]
    n_epochs = kwargs["n_epochs"]
    restore = kwargs["restore"]
    verbose = not kwargs["quiet"]
    lr = kwargs["lr"]
    weight_decay = kwargs["weight_decay"]
    seed = kwargs["seed"]
    dataset_name = kwargs["dataset"]
    kwargs["dataset"] = data.DatasetEnum.lookup(dataset_name)

    if not kwargs["no_cuda"]:
        torch.cuda.set_device(kwargs["gpu_number"])
    model.set_seed(seed)
    embed_loader = data.SSTEmbeddingLoader("data")

    id_dict, weights, unk_vocab_list = embed_loader.load_embed_data()
    word_model_static = model.SSTWordEmbeddingModel(id_dict, weights,
                                                    unk_vocab_list)
    id_dict, weights, unk_vocab_list = embed_loader.load_embed_data()
    word_model_nonstatic = model.SSTWordEmbeddingModel(
        id_dict, weights, unknown_vocab=unk_vocab_list, static=False)
    if not kwargs["no_cuda"]:
        word_model_static.cuda()
        word_model_nonstatic.cuda()
    micro_sem = model.MicroSem(word_model_static, **kwargs)
    if restore:
        micro_sem.load(kwargs["input_file"])
    if not kwargs["no_cuda"]:
        micro_sem.cuda()

    micro_sem.train()
    criterion = nn.CrossEntropyLoss()
    output_layer = getattr(micro_sem, "output_{}".format(dataset_name))
    if kwargs["train_classifier_only"]:
        parameters = output_layer.parameters()
    else:
        parameters = list(
            filter(lambda p: p.requires_grad, micro_sem.parameters()))
    # optimizer = torch.optim.SGD(parameters, lr=lr, weight_decay=weight_decay, momentum=0.9)
    # optimizer = torch.optim.Adam(parameters, lr=5E-3, weight_decay=1E-4)
    optimizer = torch.optim.SGD(parameters,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=1E-4)
    scheduler = ReduceLROnPlateau(optimizer,
                                  patience=kwargs["dev_per_epoch"] * 41,
                                  mode="max")
    train_set, dev_set, test_set = data.SSTDataset.load_sst_sets(
        "data", dataset=dataset_name)

    collate_fn = micro_sem.convert_dataset
    # sampler = data.BinningSampler(train_set.sentences, mbatch_size=mbatch_size)
    train_loader = utils.data.DataLoader(train_set,
                                         shuffle=True,
                                         batch_size=mbatch_size,
                                         drop_last=True,
                                         collate_fn=collate_fn)
    dev_loader = utils.data.DataLoader(dev_set,
                                       batch_size=len(dev_set),
                                       collate_fn=collate_fn)
    test_loader = utils.data.DataLoader(test_set,
                                        batch_size=len(test_set),
                                        collate_fn=collate_fn)

    def evaluate(loader, dev=True):
        micro_sem.eval()
        tot_correct = 0
        tot_length = 0
        for m_in, m_out in loader:
            scores = micro_sem(m_in)
            loss = criterion(scores, m_out).cpu().data[0]
            n_correct = (torch.max(scores, 1)[1].view(
                m_in.size(0)).data == m_out.data).sum()
            # n_correct = (torch.round(scores).view(m_in.size(0)).data == m_out.data).sum()
            tot_correct += n_correct
            tot_length += m_in.size(0)
        accuracy = tot_correct / tot_length
        scheduler.step(accuracy)

        if dev and accuracy >= evaluate.best_dev:
            evaluate.best_dev = accuracy
            print("Saving best model ({})...".format(accuracy))
            micro_sem.save(kwargs["output_file"])
        if verbose:
            print("{} set accuracy: {}, loss: {}".format(
                "dev" if dev else "test", accuracy, loss))
        micro_sem.train()

    evaluate.best_dev = 0

    for epoch in range(n_epochs):
        print("Epoch number: {}".format(epoch), end="\r")
        if verbose:
            print()
        i = 0
        for j, (train_in, train_out) in enumerate(train_loader):
            optimizer.zero_grad()

            if not kwargs["no_cuda"]:
                train_in.cuda()
                train_out.cuda()

            scores = micro_sem(train_in)
            loss = criterion(scores, train_out)
            loss.backward()
            optimizer.step()
            accuracy = (torch.max(scores, 1)[1].view(-1).data
                        == train_out.data).sum() / mbatch_size
            i += mbatch_size
            if i % (len(train_set) // kwargs["dev_per_epoch"]) < mbatch_size:
                evaluate(dev_loader)
    evaluate(test_loader, dev=False)
    return evaluate.best_dev
예제 #6
0
from catalyst.dl import utils
from collections import Counter
import datetime
import matplotlib.pyplot as plt
import numpy as np
import omegaconf
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers.neptune import NeptuneLogger
from tqdm import tqdm

from model import LyftMultiModel, set_seed
# from logzero import logger

set_seed(42)

# Hyperparameters
cfg = load_config_data(
    "/data/lyft-motion-prediction-autonomous-vehicles/lyft-config-files/agent_motion_config.yaml"
)
cfg = omegaconf.DictConfig(cfg)
name_for_save = 'Big_training'
epochs = cfg["model_params"]["epochs"]
learning_rate = cfg["model_params"]["lr"]
training_percentage = cfg["train_data_loader"]["training_percentage"]
validation_percentage = cfg["val_data_loader"]["validation_percentage"]

API_KEY = os.environ.get('NEPTUNE_API_KEY')
neptune_logger = NeptuneLogger(
    api_key=API_KEY,
예제 #7
0
파일: run.py 프로젝트: yyht/openie6
    train_dataset, val_dataset, test_dataset, meta_data_vocab, all_sentences = data.process_data(
        hparams)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=hparams.batch_size,
                                  collate_fn=data.pad_data,
                                  shuffle=True,
                                  num_workers=1)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=hparams.batch_size,
                                collate_fn=data.pad_data,
                                num_workers=1)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=hparams.batch_size,
                                 collate_fn=data.pad_data,
                                 num_workers=1)

    for process in hparams.mode.split('_'):
        globals()[process](hparams, checkpoint_callback, meta_data_vocab,
                           train_dataloader, val_dataloader, test_dataloader,
                           all_sentences)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser = Trainer.add_argparse_args(parser)
    parser = params.add_args(parser)
    hyperparams = parser.parse_args()
    set_seed(hyperparams.seed)

    main(hyperparams)
def main():
    # Instantiate the Environment and Agent
    env = gym.make('AuctionEmulator-v0')
    env.seed(0)
    set_seed()
    agent = RlBidAgent()

    train_budget = env.bid_requests.payprice.sum() / 8
    # Set budgets for each episode
    budget_proportions = []
    for episode in env.bid_requests.weekday.unique():
        budget_proportions.append(
            len(env.bid_requests[env.bid_requests.weekday == episode]) /
            env.total_bids)
    for i in range(len(budget_proportions)):
        budget_proportions[i] = round(train_budget * budget_proportions[i])

    epochs = 400

    for epoch in range(epochs):

        print("Epoch: ", epoch + 1)
        obs, done = env.reset()
        agent.episode_budgets = budget_proportions.copy()
        if agent.exp_type in ('free_lambda', 'improved_drlb'):
            agent.ctl_lambda = 0.01
        agent._reset_episode()
        agent.cur_day = obs['weekday']
        agent.cur_hour = obs['hour']
        agent.cur_state = agent._get_state()  # observe state s_0

        while not done:  # iterate through the whole dataset
            bid = agent.act(
                obs, eval_mode=False
            )  # Call agent action given each bid request from the env
            next_obs, cur_reward, potential_reward, cur_cost, win, done = env.step(
                bid
            )  # Get information from the environment based on the agent's action
            agent._update_reward_cost(
                bid, cur_reward, potential_reward, cur_cost,
                win)  # Agent receives reward and cost from the environment
            obs = next_obs
        print(
            "Episode Result with Step={} Budget={} Spend={} impressions={} clicks={}"
            .format(agent.global_T, int(agent.budget),
                    int(agent.budget_spent_e), agent.wins_e, agent.rewards_e))
        agent.episode_memory.append([
            agent.budget,
            int(agent.budget_spent_e), agent.wins_e, agent.rewards_e
        ])

        # Saving models and history
        if ((epoch + 1) % 25) == 0:
            PATH = 'models/model_state_{}.tar'.format(epoch + 1)
            torch.save(
                {
                    'local_q_model':
                    agent.dqn_agent.qnetwork_local.state_dict(),
                    'target_q_model':
                    agent.dqn_agent.qnetwork_target.state_dict(),
                    'q_optimizer': agent.dqn_agent.optimizer.state_dict(),
                    'rnet': agent.reward_net.reward_net.state_dict(),
                    'rnet_optimizer': agent.reward_net.optimizer.state_dict()
                }, PATH)

            f = open('models/rnet_memory_{}.txt'.format(epoch + 1), "wb")
            cloudpickle.dump(agent.dqn_agent.memory, f)
            f.close()
            f = open('models/rdqn_memory_{}.txt'.format(epoch + 1), "wb")
            cloudpickle.dump(agent.reward_net.memory, f)
            f.close()

            pd.DataFrame(agent.step_memory).to_csv(
                'models/step_history_{}.csv'.format(epoch + 1),
                header=None,
                index=False)
            agent.step_memory = []
            pd.DataFrame(agent.episode_memory).to_csv(
                'models/episode_history_{}.csv'.format(epoch + 1),
                header=None,
                index=False)
            agent.episode_memory = []

        print("EPOCH ENDED")

    env.close()  # Close the environment when done