Example #1
0
    def forward(self, X1, X2, **kwargs):
        alpha, mean_lam, gamma, delta = self._get_params(X1, **kwargs)
        cfg1, res1, kappa1, kr_pref1, _ = self._compute_terms(
            X1, alpha, mean_lam, gamma, delta)
        if X2 is not X1:
            cfg2, res2, kappa2, kr_pref2, _ = self._compute_terms(
                X2, alpha, mean_lam, gamma, delta)
        else:
            cfg2, res2, kappa2, kr_pref2 = cfg1, res1, kappa1, kr_pref1
        res2 = anp.reshape(res2, (1, -1))
        kappa2 = anp.reshape(kappa2, (1, -1))
        kr_pref2 = anp.reshape(kr_pref2, (1, -1))
        kappa12 = self._compute_kappa(
            anp.add(res1, res2), alpha, mean_lam)
        kmat_res = anp.subtract(kappa12, anp.multiply(kappa1, kappa2))
        kmat_res = anp.multiply(kr_pref1, anp.multiply(
            kr_pref2, kmat_res))

        kmat_x = self.kernel_x(cfg1, cfg2)
        if self.encoding_delta is None:
            if delta > 0.0:
                tmpmat = anp.add(kappa1, anp.subtract(
                    kappa2, kappa12 * delta))
                tmpmat = tmpmat * (-delta) + 1.0
            else:
                tmpmat = 1.0
        else:
            tmpmat = anp.add(kappa1, anp.subtract(
                kappa2, anp.multiply(kappa12, delta)))
            tmpmat = anp.multiply(tmpmat, -delta) + 1.0

        return kmat_x * tmpmat + kmat_res
Example #2
0
def forward(params, inputs=None, hps=None):
    hidden_activation = np.array([
        np.exp(-np.matmul(
            np.subtract(inputs, params['input']['hidden']['bias'][:, h])**2,
            params['input']['hidden']['weights'][:, h],
        )) for h in range(params['input']['hidden']['weights'].shape[1])
    ]).T

    channel_activations = hps['channel_activation'](np.add(
        np.matmul(
            hidden_activation,
            params['hidden']['categories']['weights'],
        ),
        params['hidden']['categories']['bias'],
    ))

    ## reconstructive error
    output_activation = np.sum(np.square(
        np.subtract(
            inputs,
            channel_activations,
        )),
                               axis=2).T

    output_activation = 1 - hps['output_activation'](
        output_activation / output_activation.sum(axis=1, keepdims=True))
    return [hidden_activation, channel_activations, output_activation]
Example #3
0
def data_preparation(data, train_test_ratio, type_data_prep):
    # data preparation using regular
    if type_data_prep == True:
        train, test = split_data(data, train_test_ratio)
        #min = np.min(train, axis=0, keepdims=True)  # take column min values
        #max = np.max(train, axis=0, keepdims=True)
        #train = (train - min) / (max - min)
        X_train = train[:, 1:]
        y_train = train[:, 0]
        X_test = test[:, 1:]
        y_test = test[:, 0]
        #label deal
        y_train = np.subtract(y_train, 8)
        y_test = np.subtract(y_test, 8)
        # data scaling
        X_train = X_train / 50000
        X_test = X_test / 50000
        return X_train, y_train, X_test, y_test
    # data preparation using cross-validation
    if type_data_prep == False:
        X_train, y_train, X_test, y_test = split_data_crossvalid(data)
        #label deal
        y_train = np.subtract(y_train, 8)
        y_test = np.subtract(y_test, 8)
        # data scaling
        X_train = X_train / 50000
        X_test = X_test / 50000
        return X_train, y_train, X_test, y_test
Example #4
0
def f3(x):
     a=np.array([1.0,0.0])
     b=np.array([0.0,-1.0])
     B=np.array([[3.0,-1.0],[-1.0,3.0]])
     I=np.array([[1.0,0.0],[0.0,1.0]])
     y1=np.exp(-(np.dot(np.subtract(x,a),np.transpose(np.subtract(x,a)))))
     y2=np.exp(-(np.dot(np.dot(np.subtract(x,b),B),np.transpose(np.subtract(x,b)))))
     y3=(np.log(np.linalg.det(np.add(0.01*I,np.dot(np.transpose(x),x)))))/10.0
     #y3=np.log((x[0]*x[0]+0.01)*(x[1]*x[1]+0.01)-x[0]*x[0]*x[1]*x[1])/10.0
     return 1.0-(y1+y2-y3)
Example #5
0
 def func(var):
     # (var[0]-args[:,0])**3
     temp = np.sin(np.power(np.subtract(var[0], args[:, 0]), 3))
     ele1 = np.sum(temp)
     # sin(var[1]**3 - var[2]**2) * args[:,1]
     temp = np.sin(np.subtract(np.power(var[1], 3), np.power(var[2], 2)))
     ele2 = np.sum(np.dot(temp, args[:, 1]))
     # log(args[:,1]*args[:,2]*var[0]*var[1]*var[2])
     temp = np.multiply(args[:, 1], args[:, 2])
     ele3 = np.sum(
         np.log(1 + np.abs(np.dot(var[0] * var[1] * var[2], temp))))
     # print("\n\n", ele1, "\n", ele2, "\n", ele3)
     # print(res, type(res))
     return ele1 + ele2 + ele3
    def loss_func(self, params, iter):
        '''
        Function to implement cross entropy loss
        > y_hat:
        > y: 
        '''

        n = self.y.shape[1]

        y_hat = self.forward_prop(self.X, params)
        # print(self.y.shape)
        if self.activations[-1] == 'softmax':
            # Softmax Loss
            loss = np.mean(-np.sum(np.log(y_hat) * self.y))

        elif self.activations[-1] == 'linear':
            # MSE loss
            loss = np.square(np.subtract(self.y, y_hat)).mean()

        else:
            # Cross Entropy Loss
            loss = -1 / n * (np.sum(self.y * np.log(y_hat) +
                                    (1 - self.y) * np.log(1 - y_hat)))

        if self.verbose:
            if iter % 100 == 0:
                print("> loss after iteration {}: {}".format(
                    iter, loss._value))
                # print(loss._value)

        self.loss_list.append(loss._value)
        return loss
Example #7
0
    def squared_error(self, performances: np.ndarray, features: np.ndarray,
                      labels: np.ndarray, weights: np.ndarray,
                      sample_weights: np.ndarray):
        """Compute squared error for regression

        Arguments:
            performances {np.ndarray} -- [description]
            features {np.ndarray} -- [description]
            weights {np.ndarray} -- [description]
            sample_weights {np.ndarray} -- weights of the individual samples

        Returns:
            [type] -- [description]
        """
        loss = 0
        # add one column for bias
        feature_values = np.hstack((features, np.ones((features.shape[0], 1))))
        utilities = None
        if self.use_exp_for_regression:
            utilities = np.exp(np.dot(weights, feature_values.T))
        else:
            utilities = np.dot(weights, feature_values.T)
        inverse_utilities = utilities
        if self.use_reciprocal_for_regression:
            inverse_utilities = np.reciprocal(utilities)
        indices = labels.T - 1
        loss += np.mean(sample_weights[:, np.newaxis] * np.square(
            np.subtract(performances,
                        inverse_utilities.T[np.arange(len(labels)),
                                            indices].T)))
        return loss
Example #8
0
def loss(params, inputs=None, targets=None, hps=None):
    return np.sum(
        np.square(
            np.subtract(
                forward(params, inputs=inputs, hps=hps)[-1],
                targets,
            )))
Example #9
0
        def mean_square_error(params, X, Y):

            predicted_y = forward(params, X)

            return numpy.sum(numpy.sum(numpy.square(
                numpy.subtract(predicted_y, Y)),
                                       axis=1),
                             axis=0)
 def cost(X):
     U = X[0]
     cst = 0
     for n in range(N):
         cst = cst + huber(U[n, :])
     Mat = np.matmul(np.matmul(X[0], np.diag(X[1])), X[2])
     fidelity = LA.norm(np.subtract(np.matmul(A, Mat), YT))
     return cst + lambd * fidelity**2
Example #11
0
 def _compute_terms(self, X, alpha, mean_lam, gamma, delta, ret_mean=False):
     dim = self.kernel_x.dimension
     cfg = X[:, :dim]
     res = X[:, dim:]
     kappa = self._compute_kappa(res, alpha, mean_lam)
     kr_pref = anp.reshape(gamma, (1, 1))
     
     if ret_mean or (self.encoding_delta is not None) or delta > 0.0:
         mean = self.mean_x(cfg)
     else:
         mean = None
     if self.encoding_delta is not None:
         kr_pref = anp.subtract(kr_pref, anp.multiply(delta, mean))
     elif delta > 0.0:
         kr_pref = anp.subtract(kr_pref, mean * delta)
         
     return cfg, res, kappa, kr_pref, mean
Example #12
0
def loss(params, X=None, Y=None, init_cell_state=None, init_hidden_state=None):
    output = forward_seq(params,
                         X=X,
                         init_cell_state=init_cell_state,
                         init_hidden_state=init_hidden_state)

    return np.sum(
        np.square(np.subtract(output, Y))
    )  # + np.sum(np.abs([np.sum(params[layer]['w']) for layer in params])) * .1 # <-- weight size regularization?
Example #13
0
def loss(params, inputs=None, targets=None, hps=None):
    ## Cross-Entropy (i think); usually explodes
    # o = forward(params, inputs = inputs, hps = hps)[-1]
    # c = o * targets + (1 - o) * (1 - targets)
    # return -np.sum(np.log(c))
    ## SSE
    return np.sum(
        np.square(
            np.subtract(forward(params, inputs=inputs, hps=hps)[-1], targets)))
Example #14
0
def loss(params, inputs = None, targets = None, channels = None, labels_indexed = None, hps = None):
    return np.sum(
        np.square(
            np.subtract(
                forward(params, inputs = inputs, hps = hps)[-1],
                targets
            )
        )
    )
Example #15
0
    def _compute_terms(self, X, alpha, mean_lam, gamma, delta, ret_mean=False):
        dim = self.kernel_x.dimension
        X_shape = getval(X.shape)
        cfg = anp.take(X, range(0, dim), axis=1)
        res = anp.take(X, range(dim, X_shape[1]), axis=1)
        kappa = self._compute_kappa(res, alpha, mean_lam)
        kr_pref = anp.reshape(gamma, (1, 1))

        if ret_mean or (self.encoding_delta is not None) or delta > 0.0:
            mean = self.mean_x(cfg)
        else:
            mean = None
        if self.encoding_delta is not None:
            kr_pref = anp.subtract(kr_pref, anp.multiply(delta, mean))
        elif delta > 0.0:
            kr_pref = anp.subtract(kr_pref, mean * delta)

        return cfg, res, kappa, kr_pref, mean
Example #16
0
	def eval_log_properly(self, x):
		det = np.linalg.det(self.Sigma)
		const = (self.size/2.0)*np.log(2*np.pi)
		const = -0.5*np.log(det) - const
		prec = np.linalg.inv(self.Sigma)
		t = np.subtract(x, self.Mu)
		v = np.dot(np.transpose(t), prec)
		v = -0.5*np.dot(v, t)
		return const + v
Example #17
0
def response(params, inputs=None, targets=None, channels=None, hps=None):
    if np.any(targets) == None: targets = inputs
    return np.argmin(np.sum(np.square(
        np.subtract(
            targets,
            forward(params, inputs=inputs, channels=channels, hps=hps)[-1])),
                            axis=2,
                            keepdims=True),
                     axis=0)[:, 0]
def forward(params, inputs = None, hps = None):
    hidden1_activations = np.array([
        hps['hidden1_activation'](
            np.add(
                np.matmul(
                    inputs,
                    params['input']['hidden1']['weights'][c,:,:],
                ),
                params['input']['hidden1']['bias'][c,:,:],
            )
        ) 
    for c in range(params['input']['hidden1']['weights'].shape[0])
    ])

    hidden2_activations = np.array([
        hps['hidden2_activation'](
            np.add(
                np.matmul(
                    hidden1_activations[c,:,:],
                    params['hidden1']['hidden2']['weights'][c,:,:],
                ),
                params['hidden1']['hidden2']['bias'][c,:,:],
            )
        ) 
    for c in range(params['hidden1']['hidden2']['weights'].shape[0])
    ])

    channel_activations = np.array([
        hps['channel_activation'](
            np.add(
                np.matmul(
                    hidden2_activations[c,:,:],
                    params['hidden2']['output']['weights'][c,:,:],
                ),
                params['hidden2']['output']['bias'][c,:,:],
            )
        ) 
    for c in range(params['hidden2']['output']['weights'].shape[0])
    ])

    ## reconstructive error
    output_activation = np.sum(
        np.square(
            np.subtract(
                inputs,
                channel_activations,
            )
        ),
        axis = 2
    ).T

    output_activation = 1 - hps['classifier_activation'](
        output_activation / output_activation.sum(axis=1, keepdims = True)
    )

    return [hidden1_activations, hidden2_activations, channel_activations, output_activation]
Example #19
0
def gaussian_area(x, mean, sigma):
    """
    :param x: lower/higher bound
    :param mean: gaussian param mean
    :param sigma: gaussian param sigma
    :return: area under curve from x -> inf or x-> -inf
    """
    double_prob = agnp.abs(sp.erf((x - mean) / (sigma * agnp.sqrt(2))))
    p_zero_to_bound = agnp.divide(double_prob, 2)
    return agnp.subtract(0.5, p_zero_to_bound)
Example #20
0
	def eval(self,x):
		#x and mu must have same dimensions
		det = np.linalg.det(self.Sigma)**(-0.5)
		const = (2*np.pi)**(-self.size/2.0)
		const = det*const
		prec = np.linalg.inv(self.Sigma)
		t = np.subtract(x, self.Mu)
		v = np.dot(np.transpose(t), prec)
		v = np.exp(-0.5*np.dot(v, t))
		return const*v
Example #21
0
    def diagonal(self, X):
        alpha, mean_lam, gamma, delta = self._get_params(X)
        cfg, res, kappa, kr_pref, _ = self._compute_terms(
            X, alpha, mean_lam, gamma, delta)
        kappa2 = self._compute_kappa(res * 2, alpha, mean_lam)
        kdiag_res = anp.subtract(kappa2, anp.square(kappa))
        kdiag_res = anp.reshape(anp.multiply(kdiag_res, anp.square(kr_pref)),
                                (-1, ))
        kdiag_x = self.kernel_x.diagonal(cfg)
        if self.encoding_delta is None:
            if delta > 0.0:
                tmpvec = anp.subtract(kappa * 2, kappa2 * delta)
                tmpvec = anp.reshape(tmpvec * (-delta) + 1.0, (-1, ))
            else:
                tmpvec = 1.0
        else:
            tmpvec = anp.subtract(kappa * 2, anp.multiply(kappa2, delta))
            tmpvec = anp.reshape(anp.multiply(tmpvec, -delta) + 1.0, (-1, ))

        return kdiag_x * tmpvec + kdiag_res
Example #22
0
def loss(params, inputs = None, exemplars = None, targets = None, hps = None):
    output_activation = forward(params, inputs = inputs, exemplars = exemplars, hps = hps)[-1]
    targets = (output_activation * targets).clip(1, np.inf) * targets # <-- humble teacher principle (performs max(1,t) func on correct category labels, and min(-1,t) on incorrect channels)

    return .5 * np.sum(
        np.square(
            np.subtract(
                output_activation,
                targets
            )
        )
    )
Example #23
0
def grad_des_f2(start_x,pace):
     #pace = 0.1
     x=start_x
     #x=np.array([[1],[-1]])
     each_x1_array=np.array([x[0]])
     each_x2_array=np.array([x[1]]) #define an array for every x used
     res_f2_array=np.array([f2(x)]) #an array for every result
     iteration = 50
     while (iteration > 1):
          grad_x=grad_f2(x)
          x=np.subtract(x,pace*grad_x)#go towards the diretion of gradient
          each_x1_array = np.append(each_x1_array,x[0])#add new x to x-array
          each_x2_array = np.append(each_x2_array,x[1])
          res_f2_array = np.append(res_f2_array,f2(x))#add new result to array
          iteration = iteration - 1
     ####################################
     #x=each_x_array[0]
     #y=each_x_array[1]
     x=np.arange(-20,20,0.01)
     y=np.arange(-20,20,0.01)
     X,Y=np.meshgrid(x,y)
     #Z=np.zeros(shape=(300,300))
     #for i in range(300):
     #     for j in range(300):
     #          Z[i][j]=f2([X[i][j],Y[i][j]])
     #print res_f2_array,each_x1_array,each_x2_array
     Z=f2([X,Y])
     ###################################
     labels = np.array(range(50))
     plt.contourf(X,Y,Z,20,alpha=0.75, cmap=plt.cm.hot)
     C=plt.contour(X,Y,Z,20,colors='black',linewidth=.5)
     plt.clabel(C,inline = True,fontsize = 10)
     plt.scatter(each_x1_array, each_x2_array,c='black', s = 10,marker='s',linewidths=1, alpha = 0.5)
     for label, x, y in zip(labels, each_x1_array, each_x2_array):
         plt.annotate(
             label,
             xy=(x, y), xytext=(-20, 20),
             textcoords='offset points', ha='right', va='bottom',
             bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
             arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
     plt.xlim(-20,20)
     plt.ylim(-20,20)
     plt.show()
     #print res_f2_array
     #fig = plt.figure()
     #ax = fig.add_subplot(111, projection="3d")
     #ax.plot_surface(X, Y, Z, cmap="autumn_r", lw=0.5, rstride=1, cstride=1, alpha=0.5)
     #ax.contour(X, Y, Z, 10, lw=3, cmap="autumn_r", linestyles="solid", offset=-1)
     #ax.contour(X, Y, Z, 10, lw=3, colors="k", linestyles="solid")
     #plt.show()
     
     
     '''
Example #24
0
def cherry_warp(wts, inputs, labels, parameters):

    # hidden layer input
    biased_inputs = np.subtract(inputs, wts[0])**2

    # hidden layer activations
    hidden_activations = np.exp(
        -np.absolute(np.einsum('hif,fh -> ih', biased_inputs, wts[2])))

    # output layer activations
    output_activations = softmax(hidden_activations, wts, labels, parameters)

    return output_activations, hidden_activations
Example #25
0
def loss(params, inputs=None, targets=None, hps=None, pz=None):

    hid, out = forward(params, inputs=inputs, hps=hps)
    return np.add(

        ## reconstructive error
        np.sum(np.square(np.subtract(
            out,
            targets,
        ))),

        ## KL divergence between hidden layer and random gaussian distribution
        -np.sum(hid * np.log(hid / pz))) / inputs.shape[0]
def update_bias(bias, grad_bias, rate):
    '''
    given the bias gradients from nll_gradients(), update the bias values
    by the amount rate

    param bias: list, old bias values
    param grad_bias: list, bias gradients
    param rate: float, learning rate
    '''
    output = []
    for i in range(len(bias)):
        output.append(np.subtract(bias[i], rate*grad_bias[i]))
    return output
def update_weights(weights, grad_weights, rate):
    '''
    given the weight gradients from nll_gradients(), update the weights
    by the amount rate

    param weights: list, old weight values
    param grad_weights: list, weight gradients
    param rate: float, learning rate
    '''
    output = []
    for i in range(len(weights)):
        output.append(np.subtract(weights[i], rate*grad_weights[i]))
    return output
Example #28
0
def forward(params, inputs=None, hps=None):
    hidden_activation = np.array([
        np.exp(-np.matmul(
            (np.subtract(inputs, params['input']['hidden']['bias'][:, h]) *
             np.array([params['attn'], 1 - params['attn']]))**2,
            params['input']['hidden']['weights'][:, h],
        )) for h in range(params['input']['hidden']['weights'].shape[1])
    ]).T

    output_activation = hps['output_activation'](
        hidden_activation @ params['hidden']['output']['weights'])

    return [hidden_activation, output_activation]
Example #29
0
def loss(params,
         inputs=None,
         inference_targets=None,
         classification_targets=None,
         channels_indexed=None,
         labels_indexed=None,
         hps=None):
    if labels_indexed == None:
        labels_indexed = np.zeros([1, inputs.shape[0]], dtype=int)

    hidden_activation, channel_activations, classifier_activation = forward(
        params, inputs=inputs, channels_indexed=channels_indexed, hps=hps)
    channel_activation = channel_activations[labels_indexed,
                                             range(inputs.shape[0]), :]

    return np.add(
        (1 - hps['dratio']) *
        np.sum(np.square(np.subtract(
            channel_activation,
            inference_targets,
        ))), hps['dratio'] * np.sum(
            np.square(
                np.subtract(classifier_activation, classification_targets))))
Example #30
0
def forward_pass(W1, W2, W3, b1, b2, b3, x):
    """
    forward-pass for an fully connected neural network with 2 hidden layers of M neurons
    Inputs:
        W1 : (M, 784) weights of first (hidden) layer
        W2 : (M, M) weights of second (hidden) layer
        W3 : (10, M) weights of third (output) layer
        b1 : (M, 1) biases of first (hidden) layer
        b2 : (M, 1) biases of second (hidden) layer
        b3 : (10, 1) biases of third (output) layer
        x : (N, 784) training inputs
    Outputs:
        Fhat : (N, 10) output of the neural network at training inputs
    """
    H1 = np.maximum(0,
                    np.dot(x, W1.T) +
                    b1.T)  # layer 1 neurons with ReLU activation, shape (N, M)
    H2 = np.maximum(0,
                    np.dot(H1, W2.T) +
                    b2.T)  # layer 2 neurons with ReLU activation, shape (N, M)
    Fhat = np.dot(
        H2, W3.T
    ) + b3.T  # layer 3 (output) neurons with linear activation, shape (N, 10)

    # Implement a stable log-softmax activation function at the ouput layer

    # Compute max of each row
    a = np.ones(np.shape(Fhat)) * np.expand_dims(
        np.amax(Fhat, axis=1),
        axis=1)  # a is typically max of g ; make to the same shape as Fhat
    log_sum_exp = np.ones(np.shape(Fhat)) * np.expand_dims(
        np.log(np.sum(np.exp(np.subtract(Fhat, a)),
                      axis=1)), axis=1)  # Compute using logSumExp trick
    # Element-wise subtraction
    Fhat = np.subtract(np.subtract(Fhat, a), log_sum_exp)

    return Fhat
Example #31
0
def log_likelihood_detector_frame_SNR(Data,
                                      frequencies,
                                      noise,
                                      SNR,
                                      t_c,
                                      phi_c,
                                      chirpm,
                                      symmratio,
                                      spin1,
                                      spin2,
                                      alpha_squared,
                                      bppe,
                                      NSflag,
                                      detector,
                                      cosmology=cosmology.Planck15):
    mass1 = utilities.calculate_mass1(chirpm, symmratio)
    mass2 = utilities.calculate_mass2(chirpm, symmratio)
    DL = 100 * mpc
    model = dcsimr_detector_frame(mass1=mass1,
                                  mass2=mass2,
                                  spin1=spin1,
                                  spin2=spin2,
                                  collision_time=t_c,
                                  collision_phase=phi_c,
                                  Luminosity_Distance=DL,
                                  phase_mod=alpha_squared,
                                  cosmo_model=cosmology,
                                  NSflag=NSflag)
    #model = imrdf(mass1=mass1,mass2=mass2, spin1=spin1,spin2=spin2, collision_time=t_c,collision_phase=phi_c,
    #                Luminosity_Distance=DL, cosmo_model=cosmology,NSflag=NSflag)
    frequencies = np.asarray(frequencies)
    model.fix_snr_series(snr_target=SNR,
                         detector=detector,
                         frequencies=frequencies)

    amp, phase, hreal = model.calculate_waveform_vector(frequencies)
    #h_complex = np.multiply(amp,np.add(np.cos(phase),-1j*np.sin(phase)))
    h_complex = amp * np.exp(-1j * phase)
    #noise_temp,noise_func, freq = model.populate_noise(detector=detector,int_scheme='quad')
    resid = np.subtract(Data, h_complex)
    #integrand_numerator = np.multiply(np.conjugate(Data), h_complex) + np.multiply(Data,np.conjugate( h_complex))
    integrand_numerator = np.multiply(resid, np.conjugate(resid))

    #noise_root =noise_func(frequencies)
    #noise = np.multiply(noise_root, noise_root)
    integrand = np.divide(integrand_numerator, noise)
    integral = np.real(simps(integrand, frequencies))
    #integral = np.real(np.sum(integrand))
    return -2 * integral
Example #32
0
def loss(params, inputs = None, targets = None, channels_indexed = None, labels_indexed = None, hps = None):
    if labels_indexed == None:
        labels_indexed = np.zeros([1,inputs.shape[0]], dtype=int)

    channel_activations = forward(params, inputs = inputs, channels_indexed = channels_indexed, hps = hps)[-1]
    channel_activation = channel_activations[labels_indexed, range(inputs.shape[0]),:]

    return np.sum(
        np.square(
            np.subtract(
                channel_activation,
                targets,
            )
        )
    )
Example #33
0
def eval_log_prec(Mu, prec, x):
	t = np.subtract(x, Mu)
	v = np.dot(np.transpose(t), prec)
	v = -0.5*np.dot(v, t)
	return v