Ejemplo n.º 1
0
def visualize_net():
    model = ToyNet()
    model.load_state_dict(torch.load(SAVE_PATH / 'net_best'))
    model.eval()

    RESOLUTION = 500
    plot_range = ToyDataset.plot_range()

    x1 = torch.linspace(*plot_range, steps=RESOLUTION)
    x2 = torch.linspace(*plot_range, steps=RESOLUTION)
    grid_x1, grid_x2 = torch.meshgrid(x1, x2)
    x = torch.stack((grid_x1, grid_x2), dim=-1)
    x = x.view(-1, ToyDataset.DIMENTION)
    with torch.no_grad():
        prediction = model(x).argmax(dim=-1).view(RESOLUTION, RESOLUTION)
    plt.contourf(grid_x1, grid_x2, prediction)

    dataloader = DataLoader(ToyDataset(), batch_size=512)
    x, y = next(iter(dataloader))
    a_examples = x[y == 0]
    b_examples = x[y == 1]
    plt.plot(a_examples[:, 0], a_examples[:, 1], 'o', label='a')
    plt.plot(b_examples[:, 0], b_examples[:, 1], 'o', label='b')
    plt.title('Net Trained to converge')
    plt.legend()
    plt.tight_layout()
    plt.savefig('./figures/net.png')
def init_toy_dataset():
    train_dataset = ToyDataset()
    train_loader = DataLoader(
        dataset=train_dataset,
        batch_size=None,
        num_workers=opts["train"]["workers"],
        worker_init_fn=worker_init_fn,
    )
    return (train_dataset, train_loader)
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser(
        'Train a simple classifier on a toy dataset')

    parser.add_argument('--dataset', type=str, default='')
    parser.add_argument('--train-fraction',
                        type=float,
                        default=.5,
                        help='proportion of the dataset to use for training')
    parser.add_argument('--n-samples', type=int, default=10000)
    parser.add_argument('--hidden-size',
                        type=int,
                        default=512,
                        help='Hidden size of the cleanup network')
    parser.add_argument('--epochs', type=int, default=20)
    parser.add_argument('--batch-size', type=int, default=32)
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--momentum', type=float, default=0.9)
    parser.add_argument('--seed', type=int, default=13)
    parser.add_argument('--logdir',
                        type=str,
                        default='trained_models/simple_classifier',
                        help='Directory for saved model and tensorboard log')
    parser.add_argument('--load-model',
                        type=str,
                        default='',
                        help='Optional model to continue training from')
    parser.add_argument(
        '--name',
        type=str,
        default='',
        help=
        'Name of output folder within logdir. Will use current date and time if blank'
    )
    parser.add_argument('--weight-histogram',
                        action='store_true',
                        help='Save histograms of the weights if set')

    args = parser.parse_args()

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    rng = np.random.RandomState(seed=args.seed)

    dataset_train = ToyDataset(args.n_samples)
    dataset_test = ToyDataset(args.n_samples)

    trainloader = torch.utils.data.DataLoader(
        dataset_train,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=0,
    )

    # For testing just do everything in one giant batch
    testloader = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=len(dataset_test),
        shuffle=False,
        num_workers=0,
    )

    model = FeedForward(input_size=2,
                        hidden_size=args.hidden_size,
                        output_size=4)

    # Open a tensorboard writer if a logging directory is given
    if args.logdir != '':
        current_time = datetime.now().strftime('%b%d_%H-%M-%S')
        save_dir = osp.join(args.logdir, current_time)
        writer = SummaryWriter(log_dir=save_dir)
        if args.weight_histogram:
            # Log the initial parameters
            for name, param in model.named_parameters():
                writer.add_histogram('parameters/' + name,
                                     param.clone().cpu().data.numpy(), 0)

    criterion = nn.CrossEntropyLoss()
    # criterion = nn.NLLLoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum)

    for e in range(args.epochs):
        print('Epoch: {0}'.format(e + 1))

        avg_loss = 0
        n_batches = 0
        for i, data in enumerate(trainloader):

            locations, labels = data

            if locations.size()[0] != args.batch_size:
                continue  # Drop data, not enough for a batch
            optimizer.zero_grad()

            # outputs = torch.max(model(locations), 1)[1].unsqueeze(1)
            outputs = model(locations)

            loss = criterion(outputs, labels)

            avg_loss += loss.data.item()
            n_batches += 1

            loss.backward()

            # print(loss.data.item())

            optimizer.step()

        print(avg_loss / n_batches)

        if args.logdir != '':
            if n_batches > 0:
                avg_loss /= n_batches
                writer.add_scalar('avg_loss', avg_loss, e + 1)

            if args.weight_histogram and (e + 1) % 10 == 0:
                for name, param in model.named_parameters():
                    writer.add_histogram('parameters/' + name,
                                         param.clone().cpu().data.numpy(),
                                         e + 1)

    print("Testing")
    with torch.no_grad():

        # Everything is in one batch, so this loop will only happen once
        for i, data in enumerate(testloader):

            locations, labels = data

            outputs = model(locations)

            loss = criterion(outputs, labels)

            print(loss.data.item())

        if args.logdir != '':
            # TODO: get a visualization of the performance
            writer.add_scalar('test_loss', loss.data.item())

    # Close tensorboard writer
    if args.logdir != '':
        writer.close()

        torch.save(model.state_dict(), osp.join(save_dir, 'model.pt'))

        params = vars(args)
        with open(osp.join(save_dir, "params.json"), "w") as f:
            json.dump(params, f)
Ejemplo n.º 4
0
    make_dir("figures/od")
    make_dir("files/od")
    make_dir("models/od")

    make_dir("figures/od/{}".format(args.name))
    make_dir("files/od/{}".format(args.name))
    make_dir("models/od/{}".format(args.name))

    make_dir("figures/od/{}/fold_{}".format(args.name, args.fold))
    make_dir("files/od/{}/fold_{}".format(args.name, args.fold))
    make_dir("models/od/{}/fold_{}".format(args.name, args.fold))

    # dataset = LightCurveDataset(args.name, fold=True, bs=args.bs, device=args.d, eval=True)
    if args.name == "toy":
        dataset = ToyDataset(args, val_size=0.1, sl=64)
        outlier_class = [3, 4]
    if args.name == "asas_sn":
        dataset = ASASSNDataset(fold=args.fold,
                                bs=args.bs,
                                device=args.d,
                                eval=True)
        outlier_class = [8]
    args.nin = dataset.x_train.shape[2]

    autoencoder = Model(args)
    loss, best_model, last_model = autoencoder.fit(dataset.train_dataloader,
                                                   dataset.val_dataloader,
                                                   args)
    torch.save(
        best_model,
Ejemplo n.º 5
0
def train_net():
    model = ToyNet()
    dataset = ToyDataset()
    optimizer = Adam(model.parameters())
    train('net', model, dataset, optimizer)
import torch.nn as nn
from models import FeedForward
from toy_dataset import ToyDataset, plot_data
import matplotlib.pyplot as plt
import numpy as np

fname = sys.argv[1]

n_samples = 10000
hidden_size = 512

model = FeedForward(input_size=2, hidden_size=hidden_size, output_size=4)
model.load_state_dict(torch.load(fname), strict=True)
model.eval()

dataset_test = ToyDataset(n_samples)

# For testing just do everything in one giant batch
testloader = torch.utils.data.DataLoader(
    dataset_test,
    batch_size=len(dataset_test),
    shuffle=False,
    num_workers=0,
)

criterion = nn.CrossEntropyLoss()

with torch.no_grad():
    # Everything is in one batch, so this loop will only happen once
    for i, data in enumerate(testloader):
        locations, labels = data
Ejemplo n.º 7
0
import tensorflow as tf
import numpy as np
from toy_dataset import ToyDataset
from gcn_util import GCNUtil
import random
import math

tds = ToyDataset()
gcnu = GCNUtil()

dsLabels = tds.get_label()
dsGraph = tds.get_graph()
dsAdjacency = []
dsFeatures = []
dsDegree = []

mSize = 16
for i in range(16):
    g = gcnu.get_subgraph(dsGraph, i + 1, 3)
    A = gcnu.adjacency(g)
    F = gcnu.feature(A)
    D = gcnu.degree(g)
    A = A.reshape([1, A.shape[0], A.shape[1]])
    F = F.reshape([1, F.shape[0], F.shape[1]])
    D = D.reshape([1, D.shape[0], D.shape[1]])

    if i == 0:
        dsAdjacency = A
        dsDegree = D
        dsFeatures = F
    else:
def main():
    parser = argparse.ArgumentParser('Train a simple invertible classifier on a toy dataset')

    parser.add_argument('--dataset', type=str, default='')
    parser.add_argument('--train-fraction', type=float, default=.5, help='proportion of the dataset to use for training')
    parser.add_argument('--n-samples', type=int, default=10000)
    parser.add_argument('--hidden-size', type=int, default=2, help='Hidden size of the s and t blocks')
    parser.add_argument('--n-hidden-layers', type=int, default=1)
    parser.add_argument('--epochs', type=int, default=20)
    parser.add_argument('--batch-size', type=int, default=32)
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--momentum', type=float, default=0.9)
    parser.add_argument('--seed', type=int, default=13)
    parser.add_argument('--logdir', type=str, default='trained_models/invertible_latent_classifier',
                        help='Directory for saved model and tensorboard log')
    parser.add_argument('--load-model', type=str, default='', help='Optional model to continue training from')
    parser.add_argument('--name', type=str, default='',
                        help='Name of output folder within logdir. Will use current date and time if blank')
    parser.add_argument('--weight-histogram', action='store_true', help='Save histograms of the weights if set')

    args = parser.parse_args()

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    rng = np.random.RandomState(seed=args.seed)

    dataset_train = ToyDataset(args.n_samples)
    dataset_test = ToyDataset(args.n_samples)

    trainloader = torch.utils.data.DataLoader(
        dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=0,
    )

    # For testing just do everything in one giant batch
    testloader = torch.utils.data.DataLoader(
        dataset_test, batch_size=len(dataset_test), shuffle=False, num_workers=0,
    )

    input_size = 2
    output_size = 4
    latent_size = 2

    input_padding = output_size + latent_size - input_size

    hidden_sizes = [args.hidden_size] * args.n_hidden_layers
    model = InvertibleNetwork(
        input_output_size=max(input_size, output_size + latent_size),
        hidden_sizes=hidden_sizes,
    )
    # model = InvertibleBlock(
    #     input_output_size=max(input_size, output_size),
    #     hidden_size=args.hidden_size
    # )

    # Open a tensorboard writer if a logging directory is given
    if args.logdir != '':
        current_time = datetime.now().strftime('%b%d_%H-%M-%S')
        save_dir = osp.join(args.logdir, current_time)
        writer = SummaryWriter(log_dir=save_dir)
        if args.weight_histogram:
            # Log the initial parameters
            for name, param in model.named_parameters():
                writer.add_histogram('parameters/' + name, param.clone().cpu().data.numpy(), 0)

    criterion = nn.CrossEntropyLoss()

    z_dist = distributions.MultivariateNormal(torch.zeros(latent_size), torch.eye(latent_size))

    # criterion = nn.NLLLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)

    for e in range(args.epochs):
        print('Epoch: {0}'.format(e + 1))

        avg_loss = 0
        n_batches = 0
        for i, data in enumerate(trainloader):

            locations, labels = data

            if locations.size()[0] != args.batch_size:
                continue  # Drop data, not enough for a batch
            optimizer.zero_grad()

            # outputs = torch.max(model(locations), 1)[1].unsqueeze(1)

            # pad locations with zeros to match label dimensionality
            locations = F.pad(locations, pad=(0, input_padding), mode='constant', value=0)

            # outputs, logp = model(locations)
            outputs = model(locations)

            output_class = outputs[:, :output_size]
            output_latent = outputs[:, output_size:]

            # print(outputs.shape)
            # print(output_class.shape)
            # print(output_latent.shape)

            loss_class = criterion(output_class, labels)

            # print(z_dist.sample((output_latent.shape[0],)).shape)
            # assert False

            # loss_latent = z_dist.log_prob(output_latent) + output_latent.sum()
            # loss_latent = -(z_dist.sample((output_latent.shape[0],)).mean() - output_latent.mean())
            loss_latent = inverse_multiquadratic_v2(
                z_dist.sample((output_latent.shape[0],)),
                output_latent
            )
            # TODO: make this correct
            # loss_latent = (-z_dist.log_prob(output_latent).sum()).log()
            # loss_latent = (-z_dist.log_prob(output_latent) + logp).sum().log()
            # loss_latent = -(z_dist.log_prob(output_latent) + logp).mean()

            loss = loss_class + loss_latent

            # print("loss_class:", loss_class.data.item())
            # print("loss_latent:", loss_latent.data.item())
            # print("loss:", loss.data.item())

            # print(loss_class.shape)
            # print(loss_latent.shape)
            # print(loss.shape)

            avg_loss += loss.data.item()
            n_batches += 1

            loss.backward()

            # print(loss.data.item())

            optimizer.step()

        print(avg_loss / n_batches)

        if args.logdir != '':
            if n_batches > 0:
                avg_loss /= n_batches
                writer.add_scalar('avg_loss', avg_loss, e + 1)

            if args.weight_histogram and (e + 1) % 10 == 0:
                for name, param in model.named_parameters():
                    writer.add_histogram('parameters/' + name, param.clone().cpu().data.numpy(), e + 1)

    print("Testing")
    with torch.no_grad():

        # Everything is in one batch, so this loop will only happen once
        for i, data in enumerate(testloader):

            locations, labels = data

            # pad locations with zeros to match label dimensionality
            locations = F.pad(locations, pad=(0, input_padding), mode='constant', value=0)

            # outputs, logp = model(locations)
            outputs = model(locations)

            output_class = outputs[:, :output_size]
            output_latent = outputs[:, output_size:]

            loss_class = criterion(output_class, labels)

            # loss_latent = z_dist.log_prob(output_latent) + output_latent.sum()
            # loss_latent = -(z_dist.sample((output_latent.shape[0],)).mean() - output_latent.mean())
            loss_latent = inverse_multiquadratic_v2(
                z_dist.sample((output_latent.shape[0],)),
                output_latent
            )
            # TODO: make this correct
            # loss_latent = (-z_dist.log_prob(output_latent).sum()).log()
            # loss_latent = (-z_dist.log_prob(output_latent) + logp).sum().log()
            # loss_latent = -(z_dist.log_prob(output_latent) + logp).mean()

            loss = loss_class + loss_latent

            print(loss.data.item())

        if args.logdir != '':
            # TODO: get a visualization of the performance
            writer.add_scalar('test_loss', loss.data.item())

    # Close tensorboard writer
    if args.logdir != '':
        writer.close()

        torch.save(model.state_dict(), osp.join(save_dir, 'model.pt'))

        params = vars(args)
        with open(osp.join(save_dir, "params.json"), "w") as f:
            json.dump(params, f)
def animate_toy_examples():
    # Initialize dataset.
    ds = ToyDataset()
    n_players = ds.n_players
    player_traj_n = ds.player_traj_n
    seq_len = ds.seq_len

    home_dir = os.path.expanduser("~")
    os.makedirs(f"{home_dir}/results", exist_ok=True)

    tensors = ds[0]
    (traj_imgs, traj_img) = gen_imgs_from_sample(tensors, seq_len)
    traj_imgs[0].save(
        f"{home_dir}/results/train.gif",
        save_all=True,
        append_images=traj_imgs[1:],
        duration=400,
        loop=0,
    )
    traj_img.save(
        f"{home_dir}/results/train.png",
    )

    for JOB in ["20210408161424", "20210408160343"]:
        JOB_DIR = f"{EXPERIMENTS_DIR}/{JOB}"
        opts = yaml.safe_load(open(f"{JOB_DIR}/{JOB}.yaml"))

        # Initialize model.
        device = torch.device("cuda:0")
        model = init_model(opts, ds).to(device)
        model.load_state_dict(torch.load(f"{JOB_DIR}/best_params.pth"))
        model.eval()

        with torch.no_grad():
            tensors = ds[0]
            for step in range(seq_len):
                preds_start = n_players * step
                for player_idx in range(n_players):
                    pred_idx = preds_start + player_idx
                    preds = model(tensors)[pred_idx]
                    probs = torch.softmax(preds, dim=0)
                    samp_traj = torch.multinomial(probs, 1)

                    samp_row = samp_traj // player_traj_n
                    samp_col = samp_traj % player_traj_n

                    samp_x = samp_col.item() - 1
                    samp_y = samp_row.item() - 1

                    tensors["player_x_diffs"][step, player_idx] = samp_x
                    tensors["player_y_diffs"][step, player_idx] = samp_y
                    if step < seq_len - 1:
                        tensors["player_xs"][step + 1, player_idx] = (
                            tensors["player_xs"][step, player_idx] + samp_x
                        )
                        tensors["player_ys"][step + 1, player_idx] = (
                            tensors["player_ys"][step, player_idx] + samp_y
                        )

        (traj_imgs, traj_img) = gen_imgs_from_sample(tensors, seq_len)
        traj_imgs[0].save(
            f"{home_dir}/results/{JOB}.gif",
            save_all=True,
            append_images=traj_imgs[1:],
            duration=400,
            loop=0,
        )
        traj_img.save(f"{home_dir}/results/{JOB}.png")

    shutil.make_archive(f"{home_dir}/results", "zip", f"{home_dir}/results")
    shutil.rmtree(f"{home_dir}/results")