Example #1
0
def load_embeddings(embedding_index, embed_dim, senvocab, aspvocab):

    sentence_embed = torch.zeros(len(senvocab), embed_dim)
    i = 0
    for word in senvocab.keys():
        if (word not in embedding_index):
            if (word != 'PAD'):
                sentence_embed[i, :] = uniform.Uniform(-0.25, 0.25).sample(
                    torch.Size([embed_dim]))
        else:
            sentence_embed[i, :] = embedding_index[word]
        i += 1

    aspect_embed = torch.zeros(len(aspvocab), embed_dim)
    i = 0
    for word in aspvocab.keys():
        if (word not in embedding_index):
            if (word != 'PAD'):
                aspect_embed[i, :] = uniform.Uniform(-0.25, 0.25).sample(
                    torch.Size([embed_dim]))
        else:
            aspect_embed[i, :] = embedding_index[word]
        i += 1

    return sentence_embed, aspect_embed
Example #2
0
def GetCR(n, m, alpha, type_CR):
    '''

    :param n: The number of splings
    :param m: The number of initializations
    :param alpha: The accuracy parameter
    :param type_CR: string to determine what CI mthod to use / one of 'normal', 'bootstrap'
    :return: boolean whether gt is in CI or not
    '''
    # Generate data
    data = get_data(int(n))

    #parameter to be optimized
    theta = torch.tensor(
        [[uniform.Uniform(0., .6).sample(),
          uniform.Uniform(0., .5).sample()]],
        requires_grad=True)

    theta, _, _ = theta_n_M_CG_FR(data=data,
                                  n_runs=m,
                                  func=LogLikelihood,
                                  max_iterations=2000,
                                  learningrate=0.01,
                                  print_info=False)

    if type_CR == 'LogLike':
        # Getting Quantities that underly the CIs
        theta_hat = theta.clone().data.numpy()
        '''Scores, Hessian = get_derivatives_torch(func=LogLikelihood,
                                                param=theta,
                                                data=data,
                                                print_dims=False)
        ci, length, shape = normal_CI(alpha, Scores, Hessian, theta_hat)'''
        ci_bool = LogLikeRatio_CR(data,
                                  alpha,
                                  theta_hat,
                                  theta_gt,
                                  func=LogLikelihood)

    elif type_CR == 'Score':
        # Getting Quantities that underly the CIs
        theta_hat = theta.clone().data.numpy()
        ci_bool = Score_CR(data, alpha, theta_gt, func=LogLikelihood)

    elif type_CR == 'Wald':
        # Getting Quantities that underly the CIs
        theta_hat = theta.clone().data.numpy()
        Scores, Hessian = get_derivatives_torch(func=LogLikelihood,
                                                param=theta,
                                                data=data,
                                                print_info=False)

        ci_bool = Wald_CR(data, alpha, theta_hat, theta_gt, Scores, Hessian)
    else:
        print('Unknown type of CI!')
        sys.exit()

    return ci_bool
Example #3
0
    def __init__(self, num_boxes, dim):
        super(Boxes, self).__init__()
        min_distribution = uniform.Uniform(1e-4, 1e-2)
        box_mins = min_distribution.sample((num_boxes, dim))

        delta_distribution = uniform.Uniform(-0.1, -0.01)
        box_deltas = delta_distribution.sample((num_boxes, dim))
        boxes = torch.stack([box_mins, box_deltas], dim=1)
        self.boxes = nn.Parameter(boxes)
Example #4
0
def theta_n_M(data, n_runs, func, max_iterations=1000, learningrate=0.01, print_info=True):
    '''
        This function performs gradient ascent on the function func, which is governed by the arguments param. Here this procedure is done with
        n_runs = M initializations. The GA limit with the highest Likelihood value is returned, i.e. theta_n_M

        Arguments:
            - func: a pytorch autograd compatible function; function defining the logprobs that build the log-likelihood function (e.g. \ref{func: LogLikelihood})
            - data: torch tensor of dim $k\times n $ (c.f. section \ref{sec: Data Generation});  these govern / parametrise func
            - max_iterations}: scalar (int); (maximum) number of iterations to be performed during gradient ascent
            - learningrate: scalar; learning rate / step size of the algorithm
            - print_info: Boolean; whether info about GA runs is to be printed or not

        Outputs:
            - theta_hat: numpy arry of dim $1\times d$; The estiamtor theta_n_M that is supposed to be the MLE
            - loglikelihood_value: value of the found maximum
            - optim_trajectory: list of instances of param during optimization
        '''
    # Initializing Loss as minus infinity to make sure first run achieves higher likelihood
    max_likelihood = -1 * np.inf
    # trajectory_dict is a cache to save the gradient ascent trajectory of all gradient ascent runs
    trajectory_dict = {}

    # Running Gradient Ascent multiple (M=n_runs) times
    for run in range(n_runs):
        print(f'-------------------Run: {run}-------------\n\n')
        # Create/ Initialize variable ' TODO: make initialization more flexible
        theta = torch.tensor([[uniform.Uniform(0., .6).sample(), uniform.Uniform(0., 5.).sample()]], requires_grad=True)

        # Run complete Gradient ascent
        theta, L, trajectory = gradient_ascent_torch(func=func,
                                                     param=theta,
                                                     data=data,
                                                     max_iterations=max_iterations,
                                                     learningrate=learningrate,
                                                     run_id=run,
                                                     print_info=print_info)

        # Save optimization trajectory
        trajectory_dict.update({run: trajectory})
        # Updating Quantities if new max is found

        # compare likelihood value to previous runs
        if L > max_likelihood:
            # This takes forever if n is large. As it is torch implementation I don't see a way to get this faster
            print(f'New Maximum found! old:{max_likelihood} -> new:{L}')

            # Update highest likelihood and theta estimate
            max_likelihood = L
            theta_hat = theta.clone().data.numpy()

    # Calculating Derivatives at found theta_hat
    # get derivatives

    theta_hat = torch.tensor(theta_hat, requires_grad = True)

    return theta_hat, trajectory_dict
    def __init__(self, dist: str, config: Mapping):
        """
        Generate a set of random rotation and translation error based on a specified distribution

        :param dist: 'uniform' or 'normal', distributions where the random samples are picked from
        :param config: type=dict, keys=['R', 'T'], which give the configuration of the distribution,
                       default values are given in the code
        """
        self.error = None

        if dist == 'uniform':
            self.dist = 'uniform'
            R_range = config.get(
                'R', np.deg2rad(2))  # rotation error upto 2 degrees by default
            T_range = config.get(
                'T', 0.2)  # translation error upto 20 cm by default
            LOG.warning('Rotation error range (degrees): [-%.2f, %.2f]' %
                        (np.rad2deg(R_range), np.rad2deg(R_range)))
            LOG.warning('Translation error range (meters): [-%.3f, %.3f]' %
                        (T_range, T_range))
            self.R_dist = uniform.Uniform(-R_range, R_range)
            self.T_dist = uniform.Uniform(-T_range, T_range)

            # statistics
            self.R_mean, self.T_mean = 0, 0  # (a+b)/2
            self.R_std = 2 * R_range / np.sqrt(12)  # (b-a)/sqrt(12)
            self.T_std = 2 * T_range / np.sqrt(12)
        elif dist == 'normal':
            self.dist = 'normal'
            R_params = config.get(
                'R',
                (np.deg2rad(2), np.deg2rad(0.1)
                 ))  # rotation error at 2 degrees as mean, 0.1 degree for std.
            T_params = config.get(
                'T',
                (0.2, 0.01))  # translation error at 20cm as mean, 1cm for std.
            LOG.warning('Rotation error mean (degrees): -%.2f, std: %.2f' %
                        (np.rad2deg(R_params[0]), np.rad2deg(R_params[1])))
            LOG.warning('Translation error mean (meters): -%.3f, std: %.3f' %
                        (T_params[0], T_params[1]))
            self.R_dist = normal.Normal(*R_params)
            self.T_dist = normal.Normal(*T_params)

            # statistics
            self.R_mean, self.R_std = R_params
            self.T_mean, self.T_std = T_params
        else:
            LOG.error('Unknown distribution given: %s' % dist)
        return
Example #6
0
def NewtonMethod(func, data, lr, it, conv, print_info=False):
    theta = torch.tensor(
        [[uniform.Uniform(0., .6).sample(),
          uniform.Uniform(0., 5.).sample()]],
        requires_grad=True)

    with torch.no_grad():
        optim_trajectory = [theta.clone().data.numpy()]
    err = 10000
    for i in range(it):
        loglikelihoods = func(theta, data)

        loglikelihood_value = torch.mean(loglikelihoods)

        loglikelihood_value.backward()
        gradient = theta.grad

        #transpose for the multiplication
        grt = torch.transpose(gradient, 0, 1)

        # find inverse of hessian
        func_forHessian = lambda args: torch.mean(func(args, data))
        Hessian = torch.autograd.functional.hessian(func_forHessian,
                                                    theta).squeeze()
        print(Hessian)
        HessianInverse = torch.inverse(Hessian)

        #search direction equivalent to the greadiand update in descent
        sd = torch.mm(HessianInverse, grt)
        #transpose back to 1*dim
        sdt = torch.transpose(sd, 0, 1)

        with torch.no_grad():
            theta.add_(lr * sdt)
            theta.grad.zero_()
            optim_trajectory.append(theta.clone().data.numpy())

        if print_info:
            if i % 1 == 0:
                now = datetime.datetime.now()
                print(
                    f' Iteration: {i} \t| Log-Likelihood:{loglikelihood_value} \t|  theta: {theta}  \t|Error:{err}  |  Time needed: {datetime.datetime.now()-now}  '
                )

        err = np.linalg.norm(optim_trajectory[-1] - optim_trajectory[-2])
        if err < conv:
            break

    return theta, loglikelihood_value, optim_trajectory
    def __init__(self, latent, noise='uniform', path='.', batch_size=512):
        '''
        Initializes the Transporter, including creating the model.

        latent: (np array) Latent space distribution to map to. Must be an
        array of one dimensional vectors.
        noise: (str) Noise distribution to map from. Must be either 'uniform',
        'normal', or 'gaussian'
        path: (str) Path to store any images/weights of the model
        batch_size: (int) Batch Size
        '''
        self.latent = torch.Tensor(latent)
        self.dim = len(latent[0])

        if noise.lower() == 'uniform':
            self.noise = uniform.Uniform(-1, 1)
        elif noise.lower() == 'normal' or noise.lower() == 'gaussian':
            self.noise = normal.Normal(0, 1)
        else:
            raise Exception("{} has not been implemented yet".format(noise))

        self.path = path
        self.batch_size = batch_size

        self.create_model()
Example #8
0
def GetCI(n, m, alpha, type_CI):

    start = time.time()
    data = get_data(int(n)).cuda()
    theta = torch.tensor([[uniform.Uniform(-2, 2).sample()]],
                         requires_grad=True).cuda()

    theta, _ = gradient_ascent_torch(func=LogLikelihood,
                                     param=theta,
                                     data=data,
                                     max_iterations=5000,
                                     learningrate=0.01)

    if type_CI == 'normal':
        theta_hat = theta.cpu().clone().data.detach().numpy()
        Scores, Hessian = get_derivatives_torch(func=LogLikelihood,
                                                param=theta,
                                                data=data)
        ci, length, shape = normal_CI(alpha, Scores.cpu(), Hessian.cpu(),
                                      theta_hat)

    end = time.time()
    print(f'GetCI {end-start}')

    return ci, length, shape
Example #9
0
    def log_p(self, zs, θ):
        """
        Calculate conditional probabilities for a run of the simulator
        
        Arguments:
            zs:        List[torch.Tensor], latent variables
            θ:         torch.Tensor, parameters
        Returns:
            ps: torch.Tensor, where ps[i] = p(z_i|θ, zs[:i])
        """
        ps = [0 for _ in range(len(zs)-1)]

        # === PROB. DENSITY CALCULATION FOR ps[0] ===
        
        # torch Uniform distribution for initial positions of population
        uni_dist = torchUni.Uniform(0, self.city_size)

        ps[0] = torch.tensor(1)
        # computes ps[0] as product of pdfs of independent locations of each person
        for i in range(self.pop):
            ps[0] = ps[0] + uni_dist.log_prob(zs[0][5*i])  # initial pos x-coord
            ps[0] = ps[0] + uni_dist.log_prob(zs[0][5*i+1])  # initial pos y-coord
        # then multiply by probability of person[i] being chosen as patient zero
        ps[0] = ps[0] + math.log(1.0/float(self.pop))
        for i in range(1, len(zs)-1):
            ps[i] = self.p_latent_step(zs[i], zs[i-1], θ)
        return sum(ps)
    def __init__(self,
                 n_levels,
                 n_children,
                 n_dims,
                 low=-1000,
                 high=1000,
                 mean=0):
        if isinstance(low, Number):
            low = torch.ones(n_dims) * low
        if isinstance(high, Number):
            high = torch.ones(n_dims) * high

        self.n_levels = n_levels
        self.cluster_means = uniform.Uniform(low + mean, high + mean).sample(
            (n_children, ))
        self.concepts = []
        for i, cluster_mean in enumerate(self.cluster_means):
            pref = '\t' * (3 - n_levels)
            if n_levels > 0:
                print(pref + 'New cluster centered on {}'.format(cluster_mean))
                self.concepts.append(
                    HierarchicalConceptPool(n_levels - 1,
                                            n_children,
                                            n_dims,
                                            low=low / 10,
                                            high=high / 10,
                                            mean=cluster_mean))
            else:
                print(pref + 'New concept centered on {}'.format(cluster_mean))
                self.concepts.append(Concept(cluster_mean, np.eye(n_dims), i))
 def __init__(self, n_dims, n_concepts, low=-1, high=1):
     low = torch.ones(n_dims) * low
     high = torch.ones(n_dims) * high
     means = uniform.Uniform(low, high).sample((n_concepts, ))
     self.concepts = [
         Concept(m, torch.eye(n_dims), i) for i, m in enumerate(means)
     ]
Example #12
0
    def __call__(self, X):

        distribution = uniform.Uniform(torch.Tensor([self.lb]),
                                       torch.Tensor([self.ub]))
        multiplier = distribution.sample(torch.Size([self.batch_size]))

        return multiplier[:, None, None, None].numpy() * X
Example #13
0
    def _build_tree(self, n_levels, n_children, n_dims, low, high, mean,
                    parent_node):
        if isinstance(low, Number):
            low = torch.ones(n_dims) * low
        if isinstance(high, Number):
            high = torch.ones(n_dims) * high

        cluster_means = uniform.Uniform(low + mean, high + mean).sample(
            (n_children, ))
        pref = '\t' * (self.n_levels - n_levels)
        for i, cluster_mean in enumerate(cluster_means):
            logger.debug(pref +
                         'New cluster centered on {}'.format(cluster_mean))
            node_name = '{}{}'.format(parent_node, i)
            self.tree.add_edge(parent_node, node_name)
            if n_levels > 0:
                self._build_tree(n_levels - 1,
                                 n_children,
                                 n_dims,
                                 low=low / 10,
                                 high=high / 10,
                                 mean=cluster_mean,
                                 parent_node=node_name)
                self.tree.add_node(node_name, cluster_mean=cluster_mean)
            else:
                concept = Concept(cluster_mean, np.eye(n_dims), node_name)
                self.tree.add_node(node_name, concept=concept)
Example #14
0
    def __init__(self, hidden_size, scoring_hsize=None):
        super(BaseCell, self).__init__()
        self.hidden_size = hidden_size
        self.binary_function_name_list = ['add', 'mul']
        self.binary_function_list = [lambda x,y: x+y, lambda x,y: x*y]
        self.unary_function_name_list = ['sigmoid', 'tanh', 'oneMinus', 'identity', 'relu']
        self.unary_function_list = [torch.sigmoid, torch.tanh, lambda x: 1-x, lambda x: x, torch.relu]
        self.m = 1  # Num of output vectors
        self.N = 9  # Num of generated nodes in one cell
        self.l = 4  # Num of parameter matrices (L_i, R_i, b_i)

        # Initalize L R weights from the uniform distribution and b to be zero
        weight_distribution = uniform.Uniform(-1/np.sqrt(hidden_size), 1/np.sqrt(hidden_size))
        self.L_list = nn.ParameterList([nn.Parameter(weight_distribution.sample([hidden_size, hidden_size])) for _ in range(self.l)])
        self.R_list = nn.ParameterList([nn.Parameter(weight_distribution.sample([hidden_size, hidden_size])) for _ in range(self.l)])
        self.b_list = nn.ParameterList([nn.Parameter(torch.zeros(1, hidden_size)) for _ in range(self.l)])

        # Set L3, R3, b3 to the identity transformation #
        self.L_list[3] = nn.Parameter(torch.eye(hidden_size))
        self.R_list[3] = nn.Parameter(torch.eye(hidden_size))
        self.b_list[3] = nn.Parameter(torch.zeros(1, hidden_size))
        
        # TODO: more complicated scoring
        if scoring_hsize is not None:
            self.scoring = nn.Sequential(
                                nn.Linear(hidden_size, scoring_hsize),
                                nn.ReLU(),
                                nn.Linear(scoring_hsize, 1))
        else:
            self.scoring = nn.Linear(hidden_size, 1, bias=False)
        
        # softmax function
        self.softmax_func = torch.nn.Softmax(dim=2)
Example #15
0
def unique_random_uniform(samples_shape,
                          minval=0,
                          maxval=1,
                          dtype=tor.FloatTensor):
    """
    Generates only unique random variables. May be smaller than num_samples

    >>> s = tor.manual_seed(1)
    >>> u = unique_random_uniform((5000,), maxval=25000000, dtype=tor.LongTensor)
    >>> print(u)
    tensor([    5696,     7039,    24671,  ..., 24995458, 24996430, 24998348])
    >>> len(u)
    5000
    """
    inds = Variable(tor.ones(samples_shape).type(dtype) * -1)
    reps = Variable(tor.LongTensor((0, )))

    while inds[-1] < 0:
        uni = uniform.Uniform(minval, maxval)
        reps_np = np.array(reps, dtype=np.int64)
        inds = tor.cat(
            (inds[:reps_np[-1]], uni.sample(
                (samples_shape[-1] - reps_np[-1], )).type_as(inds)),
            dim=-1)
        inds, reps = tor.unique(inds, return_inverse=True)
        inds, _ = tor.sort(inds, dim=-1)
        inds = pad_up_to(inds, samples_shape, -1)

    return inds
Example #16
0
    def __init__(self,n,p,sig,o=0.0,bias=True):
        super(CUDAvoir,self).__init__()

        self.n = torch.tensor(n)
        self.p = torch.tensor(p)
        self.sig = torch.tensor(sig)

        self.v = torch.zeros(self.n) ## Recurrent Layer State Vector
        self.w = torch.zeros(self.n,self.n) ## Recurrent Layer Weight Matrix

        self.ol = nn.Linear(self.n, 1, bias=False) ## Linear Output Layer
        self.o = torch.tensor([o]) ## Initalize Output Neuron
        self.fb = nn.Linear(1, self.n, bias=False) ## Linear Feedback Layer

        if bias: ## Recurrent Layer Bias
            self.b = torch.FloatTensor(n).uniform_(0,1)
        else:
            self.b = torch.zeros(self.n)
        
        ## Populate Recurrent Layer Weight Matrix
        norm = normal.Normal(loc=0,scale=self.sig)
        uni = uniform.Uniform(0,1) 
        for i in range(self.n):
            for j in range(self.n):
                uni_draw = uni.sample()
                if uni_draw < self.p:
                    self.w[i,j] = norm.sample()
Example #17
0
    def _build_tree(self, n_levels, n_children, n_dims, low, high, scale_fact, cov_scale, mean, parent_name):
        if isinstance(low, Number):
            low = torch.ones(n_dims) * low
        if isinstance(high, Number):
            high = torch.ones(n_dims) * high

        cluster_means = uniform.Uniform(low + mean, high + mean).sample((n_children[0],))
        pref = '\t' * (self.n_levels - n_levels)
        concepts = []
        for i, cluster_mean in enumerate(cluster_means):
            logger.debug(pref + 'New cluster centered on {}'.format(cluster_mean))
            concept_name = '{}{}'.format(parent_name, i)
            if n_levels > 1:
                lower_concepts = self._build_tree(n_levels - 1, n_children[1:], n_dims, low=low * scale_fact,
                                                  high=high * scale_fact, scale_fact=scale_fact, cov_scale=cov_scale,
                                                  mean=cluster_mean, parent_name=concept_name)
                concept = ComposedConcept(lower_concepts, cluster_mean=cluster_mean, id=concept_name)
                self.tree.add_node(concept)

                for c in lower_concepts:
                    self.tree.add_edge(concept, c)

                self.all_nodes.add(concept)

            else:
                concept = AtomicConcept(cluster_mean, torch.eye(n_dims) * cov_scale, concept_name)
                self.tree.add_node(concept)
                self.all_nodes.add(concept)
                self.leaf_nodes.add(concept)
            concepts.append(concept)
        return concepts
def optimize(xL, xH, infer_args, program):

    beta = 10.0  # starting beta value
    ki = 0.95  # beta multiplier each iteration
    epsb = 0.05  # lower beta cutoff

    intmin = 0
    intmax = 100
    distribution = uniform.Uniform(torch.Tensor([intmin]),
                                   torch.Tensor([intmax]))
    inferred_paramL = torch.tensor(len(infer_args))
    inferred_paramH = torch.tensor(len(infer_args))

    # whether we have an input that satisfies program assertions
    satisfies = False
    while not satisfies:
        inferred_paramL = distribution.sample(inferred_paramL.size())
        inferred_paramH = distribution.sample(inferred_paramH.size())
        while not (inferred_paramL < inferred_paramH).all():
            inferred_paramL = distribution.sample(inferred_paramL.size())
            inferred_paramH = distribution.sample(inferred_paramH.size())
        inferred_paramL.requires_grad = True
        inferred_paramH.requires_grad = True

        #with torch.no_grad():
        #    inferred_paramL.fill_(-5.0)
        #    inferred_paramH.fill_(5.0)

        interval = None
        optimizer = optim.SGD([inferred_paramL, inferred_paramH], lr=0.01)
        while beta >= epsb:
            optim_state = OptimizerState(beta=beta,
                                         lambda_const=1 / 2,
                                         smooth=True)
            for i in range(500):

                def closure():
                    optimizer.zero_grad()
                    optim_state.loss = torch.tensor([0.0], requires_grad=True)
                    inferred_xL = torch.cat((xL, inferred_paramL), 0)
                    inferred_xH = torch.cat((xH, inferred_paramH), 0)
                    interval = AbsInterval(inferred_xL, inferred_xH, 1.0)
                    #print("in: %s" % (str(interval)))
                    y = program.propagate(interval, optim_state)
                    loss = optim_state.loss
                    loss.backward()
                    print("b: %f, Loss: %s,\n in: %s, \n out: %s" %
                          (beta, str(loss.item()), str(interval), str(y)))
                    return loss

                optimizer.step(closure)
                #with torch.no_grad():
                #    xL -= learning_rate * xL.grad
                #    xH -= learning_rate * xH.grad

            beta = beta * ki
        sat_state = OptimizerState(beta=beta, lambda_const=1 / 2, smooth=False)
        if program.satisfied_by(interval, sat_state):
            satisfies = True
    return inferred_paramL, inferred_paramH
Example #19
0
 def get_params_stoch(self, r, fc, out_mu, noise_trans):
     hidden_r = fc(r)
     batch_size = r.shape[0]
     epsilon = uniform.Uniform(low=0, high=1).sample(
         sample_shape=torch.Size([batch_size, self.hidden_dim]))
     epsilon_trans = epsilon + noise_trans(epsilon)
     hidden_noise = torch.cat((hidden_r, epsilon_trans), dim=1)
     mu = out_mu(hidden_noise)
     return mu
Example #20
0
 def __init__(self, n_actions, min_action=None, max_action=None):
     super(UniformPolicy, self).__init__()
     self.n_actions = n_actions
     min_action = torch.Tensor(
         min_action) if min_action is not None else -torch.ones(n_actions)
     max_action = torch.Tensor(
         max_action) if max_action is not None else torch.ones(n_actions)
     assert min_action.shape == max_action.shape and max_action.shape == torch.Size(
         [n_actions])
     self.distr = uniform.Uniform(min_action, max_action)
    def __init__(self, device, x_dim, z_dim, attr_dim, **kwargs):
        '''
        Trainer class.
        Args:
            device: CPU/GPU
            x_dim: Dimension of image feature vector
            z_dim: Dimension of noise vector
            attr_dim: Dimension of attribute vector
            kwargs
        '''
        self.device = device

        self.x_dim = x_dim
        self.z_dim = z_dim
        self.attr_dim = attr_dim

        self.n_critic = kwargs.get('n_critic', 5)
        self.lmbda = kwargs.get('lmbda', 10.0)
        self.beta = kwargs.get('beta', 0.01)
        self.bs = kwargs.get('batch_size', 32)

        self.gzsl = kwargs.get('gzsl', False)
        self.n_train = kwargs.get('n_train')
        self.n_test = kwargs.get('n_test')
        if self.gzsl:
            self.n_test = self.n_train + self.n_test

        self.eps_dist = uniform.Uniform(0, 1)
        self.Z_dist = normal.Normal(0, 1)

        self.eps_shape = torch.Size([self.bs, 1])
        self.z_shape = torch.Size([self.bs, self.z_dim])

        self.net_G = Generator(self.z_dim, self.attr_dim).to(self.device)
        self.optim_G = optim.Adam(self.net_G.parameters(), lr=1e-4)

        self.net_D = Discriminator(self.x_dim, self.attr_dim).to(self.device)
        self.optim_D = optim.Adam(self.net_D.parameters(), lr=1e-4)

        # classifier for judging the output of generator
        self.classifier = MLPClassifier(self.x_dim, self.attr_dim,
                                        self.n_train).to(self.device)
        self.optim_cls = optim.Adam(self.classifier.parameters(), lr=1e-4)

        # Final classifier trained on augmented data for GZSL
        self.final_classifier = MLPClassifier(self.x_dim, self.attr_dim,
                                              self.n_test).to(self.device)
        self.optim_final_cls = optim.Adam(self.final_classifier.parameters(),
                                          lr=1e-4)

        self.criterion_cls = nn.CrossEntropyLoss()

        self.model_save_dir = "saved_models"
        if not os.path.exists(self.model_save_dir):
            os.mkdir(self.model_save_dir)
Example #22
0
 def set_noise_dist(self, limit):
     """Determines the shape and magnitude of the noise."""
     a = -limit
     b = limit
     if self.noise_distribution == "uniform":
         self.distribution = uniform.Uniform(torch.Tensor([a]), torch.Tensor([b]))
     elif self.noise_distribution == "normal":
         self.distribution = normal.Normal(torch.Tensor([0]), torch.Tensor([b]))
     else:
         print("Unknown precision type")
         exit()
Example #23
0
    def sample(self, num_samples):
        """
        Parameters
        ----------
        num_samples: tuple. (num_samples,)

        """
        taus_uniform = uniform.Uniform(0., 1.).sample(num_samples)
        wang_tau = self.normal.cdf(value=self.normal.icdf(value=taus_uniform) +
                                   self.eta)
        return wang_tau
Example #24
0
def load_embedding_matrix(embedding, words, embedding_size):
    """Add new words in the embedding matrix and return it"""
    embedding_matrix = torch.zeros(len(words), embedding_size)
    for i, word in enumerate(words):
        # Note: PAD embedded as sequence of zeros
        if word not in embedding:
            if word != 'PAD':
                embedding_matrix[i] = uniform.Uniform(-0.25, 0.25).sample(
                    torch.Size([embedding_size]))
        else:
            embedding_matrix[i] = embedding[word]
    return embedding_matrix
Example #25
0
 def set_noise_dist(self):
     """Determines the shape and magnitude of the noise."""
     a = -self.search_radius
     b = self.search_radius
     if self.noise_distribution == "uniform":
         self.distribution = uniform.Uniform(torch.Tensor([a]),
                                             torch.Tensor([b]))
     elif self.noise_distribution == "normal":
         self.distribution = normal.Normal(torch.Tensor([0.]),
                                           torch.Tensor([b]))
     else:
         print("Unknown distribution type")
         exit()
Example #26
0
 def set_noise_dist(self):
     """Determines the shape and magnitude of the noise."""
     a = self.sr_min
     b = self.sr_max
     c = (b-a)/2.
     assert a != b  # Sanity check
     if self.noise_shape == "uniform":
         self.distribution = uniform.Uniform(torch.Tensor([a]), torch.Tensor([b]))
     elif self.noise_shape == "normal":
         self.distribution = normal.Normal(torch.Tensor([c]), torch.Tensor([b]))
     else:
         print("Unknown distribution type")
         exit()
Example #27
0
    def sample(self, num_samples):
        """
        Parameters
        ----------
        num_samples: tuple. (num_samples,)

        """
        taus_uniform = uniform.Uniform(0., 1.).sample(num_samples)
        tau_eta = taus_uniform**self.eta
        one_tau_eta = (1 - taus_uniform)**self.eta
        cpw_tau = tau_eta / ((tau_eta + one_tau_eta)**(1. / self.eta))

        return cpw_tau
Example #28
0
    def sample(self, num_samples):
        """
        Parameters
        ----------
        num_samples: tuple. (num_samples,)

        """
        taus_uniform = uniform.Uniform(0., 1.).sample(num_samples)

        if self.eta > 0:
            return taus_uniform**self.exponent
        else:
            return 1 - (1 - taus_uniform)**self.exponent
def load_embedmatrix(embed_index,vocab,embeddim):

	embedding_matrix = torch.zeros(len(vocab),embeddim)
	i = 0
	for word in vocab.keys():
		if(word not in embed_index):
			if(word!='<PAD>'):
				embedding_matrix[i,:] = uniform.Uniform(-0.25,0.25).sample(torch.Size([embeddim]))
		else:
			embedding_matrix[i,:] = embed_index[word]
		i+=1

	return embedding_matrix	
Example #30
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bias=True,
                 padding_mode='zeros',
                 mean_fac=1,
                 var_fac=.5):

        super(DoGConv, self).__init__()
        torch.autograd.set_detect_anomaly(True)
        # My initialization of the parameters is really hacky and not really useful, but it's only used once,
        # so I did not optimize it
        # First we sample the parameters for the Difference of Gaussian Filters
        uni = uniform.Uniform(torch.tensor([-1.0]), torch.tensor([1.0]))

        # For the variance we use the Cholesky decomposition in order to enforce pos. def.
        print('Initializing')
        self.means = nn.Parameter(mean_fac * uni.sample(
            (out_channels, in_channels, 2, 1, 2))[..., 0])
        var_chol = var_fac * uni.sample(
            (out_channels, in_channels, 2, 2, 2))[..., 0]
        var_chol[var_chol == 0] += 1E-1
        self.var_chol = nn.Parameter(torch.tril(var_chol))
        self.ratios = nn.Parameter(((1 + 1E-8 + (1 - 2E-8) * uni.sample((
            out_channels,
            in_channels,
        ))) / 2)[..., 0])
        self.last_filters = None
        if bias:
            self.bias = nn.Parameter(uni.sample((out_channels, ))[..., 0])
        else:
            self.bias = None
        self.kernel_size = kernel_size
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.stride = stride
        self.padding = padding
        self.dilation = dilation
        self.groups = groups
        self.padding_mode = padding_mode

        resol = np.linspace(-1, 1, self.kernel_size)
        pos = torch.tensor(np.stack(np.meshgrid(resol, resol), axis=2),
                           dtype=torch.float32)
        self.register_buffer('flat_pos',
                             pos.reshape((1, self.kernel_size**2, 2)))