def show_tensorflow():
    rnd.manual_seed(0)
    layer = nn.Linear(n, 1, bias=False)

    new_data = [[10], [20], [30], [40], [50]]
    X = tf.placeholder('float', shape=[l, n])
    W = tf.Variable(torch.Tensor.numpy(layer.weight.detach()))
    Y = tf.placeholder('float', shape=[l, 1])
    Y_pred = tf.matmul(X, W, transpose_b=True)
    loss = tf.reduce_sum(tf.square(Y - Y_pred))
    optimizer = tf.train.AdagradOptimizer(learning_rate=lr)
    minimizer = optimizer.minimize(loss)

    sess = tf.Session()

    init = tf.global_variables_initializer()

    sess.run(init)
    print('Tensorflow: Predict before training for [10, 20, 30, 40, 50] is, ',
          sess.run(Y_pred, {X: new_data}))

    for t in range(50):
        result = sess.run([minimizer], {X: X_, Y: Y_})

    print('Tensorflow: Predict after training for [10, 20, 30, 40, 50] is, ',
          sess.run(Y_pred, {X: new_data}))
Exemple #2
0
def q3():
    random.seed(0)
    trandom.manual_seed(0)
    d_p = distribution1(0, 512)

    ws_distances = []
    js_divergences = []

    for phi in [x/10. for x in range(-10,11)]:
        d_q = distribution1(phi, 512)
        train = [(next(d_p), next(d_q)) for i in range(500)]
        critic = q2(train)
        ws_distances.append((phi, critic(next(d_p),next(d_q))))
        print("Phi %.3f, WS %.3f" % (ws_distances[-1][0], ws_distances[-1][1]))
    
    for phi in [x/10. for x in range(-10,11)]:
        d_q = distribution1(phi, 512)
        train = [(next(d_p), next(d_q)) for i in range(300)]
        discriminator = q1(train)
        js_divergences.append((phi, discriminator(next(d_p), next(d_q))))
        print("Phi %.3f, JS %.3f" % (js_divergences[-1][0], js_divergences[-1][1]))
    
    print(ws_distances)
    f = open("ws.tab", "w")
    f.write("\n".join(["%.3f\t%.3f" % (x,y) for (x,y) in ws_distances]))
    f.close()
    print(js_divergences)
    f = open("js.tab", "w")
    f.write("\n".join(["%.3f\t%.3f" % (x,y) for (x,y) in js_divergences]))
    f.close()
def show_pytorch():
    X = X_.clone()
    Y = Y_.clone()

    rnd.manual_seed(0)
    model = nn.Sequential(nn.Linear(n, 1, bias=False))

    loss_fn = nn.MSELoss(reduction='sum')

    solver = optim.Adagrad(model.parameters(),
                           lr=lr,
                           lr_decay=lr_decay,
                           weight_decay=weight_decay)

    new_data = torch.tensor([[10], [20], [30], [40], [50]],
                            dtype=torch.float32)
    print('Pytorch: Predict before training for [10, 20, 30, 40, 50] is, ',
          model(new_data))
    torch.onnx.export(model, X, onnx_model_path, verbose=True)

    for t in range(50):
        Y_pred = model(X)
        loss = loss_fn(Y_pred, Y)
        model.zero_grad()
        loss.backward()
        solver.step()
    print('Pytorch: Predict after training for [10, 20, 30, 40, 50] is, ',
          model(new_data))
    def setUp(self):
        super().setUp()
        manual_seed(0)

        train_x = rand(2, 10, 1)
        train_y = randn(2, 10, 3, 5)

        train_x = train_x.to(device=self.device)
        train_y = train_y.to(device=self.device)

        self.model = HigherOrderGP(train_x, train_y, first_dim_is_batch=True)

        # check that we can assign different kernels and likelihoods
        model_2 = HigherOrderGP(
            train_x,
            train_y,
            first_dim_is_batch=True,
            covar_modules=[RBFKernel(), RBFKernel(),
                           RBFKernel()],
            likelihood=GaussianLikelihood(),
        )

        for m in [self.model, model_2]:
            mll = ExactMarginalLogLikelihood(m.likelihood, m)
            fit_gpytorch_torch(mll, options={"maxiter": 1, "disp": False})
    def setUp(self):
        super().setUp()
        manual_seed(0)

        train_x = rand(2, 10, 1)
        train_y = randn(2, 10, 3, 5)

        train_x = train_x.to(device=self.device)
        train_y = train_y.to(device=self.device)

        m1 = HigherOrderGP(train_x, train_y, first_dim_is_batch=True)
        m2 = HigherOrderGP(train_x[0], train_y[0])

        manual_seed(0)
        test_x = rand(2, 5, 1).to(device=self.device)

        posterior1 = m1.posterior(test_x)
        posterior2 = m2.posterior(test_x[0])
        posterior3 = m2.posterior(test_x)

        self.post_list = [
            [m1, test_x, posterior1],
            [m2, test_x[0], posterior2],
            [m2, test_x, posterior3],
        ]
Exemple #6
0
    def decorator(self, *args, **kwargs) -> T:
        if "random_state" in kwargs.keys():
            self._random_instance = check_random_state(kwargs["random_state"])
        elif not hasattr(self, "_random_instance"):
            self._random_instance = check_random_state(randint(0, high=MAX_NUMPY_SEED_VALUE))

        with fork_rng():
            manual_seed(self._random_instance.randint(0, high=MAX_TORCH_SEED_VALUE))
            decorated(self, *args, **kwargs)
    def test_fantasize(self):
        manual_seed(0)
        test_x = rand(2, 5, 1, device=self.device)
        sampler = IIDNormalSampler(num_samples=32).to(self.device)

        _ = self.model.posterior(test_x)
        fantasy_model = self.model.fantasize(test_x, sampler=sampler)
        self.assertIsInstance(fantasy_model, HigherOrderGP)
        self.assertEqual(fantasy_model.train_inputs[0].shape[:2], Size((32, 2)))
    def test_condition_on_observations(self):
        manual_seed(0)
        test_x = rand(2, 5, 1, device=self.device)
        test_y = randn(2, 5, 3, 5, device=self.device)

        # dummy call to ensure caches have been computed
        _ = self.model.posterior(test_x)
        conditioned_model = self.model.condition_on_observations(test_x, test_y)
        self.assertIsInstance(conditioned_model, HigherOrderGP)
Exemple #9
0
 def __init__(self, layer, ucapture, uminus, usearch, ubackoff, umin,
              maxweight):
     super(ModSTDP, self).__init__()
     # Initialize your variables here, including any Bernoulli Random Variable distributions
     self.layer = layer
     self.ucapture = ucapture
     self.uminus = uminus
     self.usearch = usearch
     self.ubackoff = ubackoff
     self.umin = umin
     self.maxweight = maxweight
     trand.manual_seed(0)  # set seed for determinism
    def test_posterior(self):
        manual_seed(0)
        test_x = rand(2, 30, 1).to(device=self.device)

        # test the posterior works
        posterior = self.model.posterior(test_x)
        self.assertIsInstance(posterior, GPyTorchPosterior)

        # test the posterior works with observation noise
        posterior = self.model.posterior(test_x, observation_noise=True)
        self.assertIsInstance(posterior, GPyTorchPosterior)

        # test the posterior works with no variances
        # some funkiness in MVNs registration so the variance is non-zero.
        with skip_posterior_variances():
            posterior = self.model.posterior(test_x)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertLessEqual(posterior.variance.max(), 1e-6)
    def test_initialize_latents(self):
        manual_seed(0)

        train_x = rand(10, 1, device=self.device)
        train_y = randn(10, 3, 5, device=self.device)

        for latent_dim_sizes in [[1, 1], [2, 3]]:
            for latent_init in ["gp", "default"]:
                self.model = HigherOrderGP(
                    train_x,
                    train_y,
                    num_latent_dims=latent_dim_sizes,
                    latent_init=latent_init,
                )
                self.assertEqual(
                    self.model.latent_parameters[0].shape,
                    Size((3, latent_dim_sizes[0])),
                )
                self.assertEqual(
                    self.model.latent_parameters[1].shape,
                    Size((5, latent_dim_sizes[1])),
                )
Exemple #12
0
    def setUp(self):
        super().setUp()
        manual_seed(0)

        train_x = rand(2, 10, 1, device=self.device)
        train_y = randn(2, 10, 3, 5, device=self.device)

        m1 = HigherOrderGP(train_x, train_y)
        m2 = HigherOrderGP(train_x[0], train_y[0])

        manual_seed(0)
        test_x = rand(2, 5, 1, device=self.device)

        posterior1 = m1.posterior(test_x)
        posterior2 = m2.posterior(test_x[0])
        posterior3 = m2.posterior(test_x)

        self.post_list = [
            [m1, test_x, posterior1],
            [m2, test_x[0], posterior2],
            [m2, test_x, posterior3],
        ]
import math

import torch
import torch.random as tr
from types import SimpleNamespace
from socket import socket
from typing import Tuple

from csv_helper import CSVFile
from memory import ReplayMemory
from net import BasketballModel, SupervisedModel
from socket_server import SocketServer
from helpers import is_request, is_result, is_correct_message
from training_handler import TrainingHandler, Connection

tr.manual_seed(9)


class ModelServer(SocketServer):
    HOST = 'localhost'
    PORT = 5600

    def __init__(self, *args, **kwargs):
        self.model = BasketballModel()
        self.handler = TrainingHandler()
        self.status = 0
        self.last_connection_amount = 0
        self.running_time = datetime.now()
        self.memory = ReplayMemory(100000)
        self.csv = CSVFile()
        super(ModelServer, self).__init__(self.HOST, self.PORT)
from create_dataset import create_dataset
from cnn_categorization_base import cnn_categorization_base
from cnn_categorization_improved import cnn_categorization_improved
from train import train
from torch import random, save
from argparse import ArgumentParser
import matplotlib.pyplot as plt

# seed the random number generator. Remove the line below if you want to try different initializations
random.manual_seed(0)


def cnn_categorization(model_type="base",
                       data_path="image_categorization_dataset.pt",
                       contrast_normalization=False,
                       whiten=False):
    """
    Invokes the dataset creation, the model construction and training functions

    Arguments
    --------
    model_type: (string), the type of model to train. Use 'base' for the base model and 'improved for the improved model. Default: base
    data_path: (string), the path to the dataset. This argument will be passed to the dataset creation function
    contrast_normalization: (boolean), specifies whether or not to do contrast normalization
    whiten: (boolean), specifies whether or not to whiten the data.

    """
    # Do not change the output path
    # but you can uncomment the exp_dir if you do not want to save the model checkpoints
    output_path = "{}_image_categorization_dataset.pt".format(model_type)
    exp_dir = "./{}_models".format(model_type)