コード例 #1
0
ファイル: RWM.py プロジェクト: niko992/StochasticProcess
def RWM4(s2, P, f):
    M = np.ceil(P / 2)
    C = Cmat(P)
    IP = np.diag(np.ones(P))
    Is2 = IP*s2
    zero = np.zeros((P,))
    q = np.zeros((cts.N, ))

    res = sp.optimize.minimize(fct.minus_log_posterior, np.random.multivariate_normal(zero, C), args=(M), method='BFGS')
    csi_map = res['x']

    Csi_old = np.random.multivariate_normal(zero, C)
    q[0]=f(Csi_old)
    def G(x):
        return fct.G(x, M)

    C_inv = np.diag([(k+1)**2 for k in range(P)])
    sqrt_C = np.diag([1/(k+1) for k in range(P)])

    gradG = nd.Jacobian(G)
    gamma = cts.sigma**2 * np.dot(gradG(csi_map).T, gradG(csi_map))
    C_gamma = np.linalg.inv(C_inv + gamma)

    H_gamma = np.dot(C, np.dot(gamma, C))
    A_gamma = np.dot(sqrt_C, np.dot(sp.linalg.sqrtm(IP-Is2+np.linalg.inv(IP+H_gamma)), sqrt_C))
    for i in range(1, cts.N):
        eps = np.random.multivariate_normal(zero, s2*C_gamma)
        Z = np.dot(A_gamma, Csi_old) + eps
        U = np.random.uniform(0, 1)
        if U < np.min([fct.f(Z, M) / fct.f(Csi_old, M), 1]):
            Csi_old = Z
        q[i]=f(Csi_old)
    return q
コード例 #2
0
    def update_params(
        self, x, errors, q_errors, batch_size, x_batch, y_batch, epoch_num=None, n_batches=None, curr_batch=None
    ):

        grad_w = [[] for _ in range(self.n_layers - 1)]
        grad_b = [[] for _ in range(self.n_layers - 1)]
        grad_w_q = [[] for _ in range(self.n_layers - 1)]
        grad_b_q = [[] for _ in range(self.n_layers - 1)]

        for l in range(self.n_layers - 1):
            grad_w[l] = (
                self.vars[-1] * (1 / batch_size) * errors[l + 1] @ F.f(x[l], self.act_fn).T
                - self.d_rate * self.W[l]
            )
            grad_b[l] = self.vars[-1] * (1 / batch_size) * torch.sum(errors[l + 1], axis=1)

        x = x[::-1]
        for l in range(self.n_layers - 1):
            grad_w_q[l] = (
                self.vars[-1] * (1 / batch_size) * q_errors[l + 1] @ F.f(x[l], self.act_fn).T
                - self.d_rate * self.W_q[l]
            )
            grad_b_q[l] = self.vars[-1] * (1 / batch_size) * torch.sum(q_errors[l + 1], axis=1)

        self._apply_gradients(
            grad_w,
            grad_b,
            grad_w_q,
            grad_b_q,
            epoch_num=epoch_num,
            n_batches=n_batches,
            curr_batch=curr_batch,
        )
コード例 #3
0
ファイル: q_network.py プロジェクト: alec-tschantz/predcoding
    def test_epoch(self, x_batches, y_batches, itr_max=None):
        accs = []
        n_batches = len(x_batches)
        avg_itr = 0
        for x_batch, y_batch in zip(x_batches, y_batches):
            x_batch = set_tensor(x_batch, self.device)
            y_batch = set_tensor(y_batch, self.device)
            batch_size = x_batch.size(1)

            x = [[] for _ in range(self.n_layers)]
            q = [[] for _ in range(self.n_layers)]

            if self.amortised is True:
                q[0] = x_batch
                for l in range(1, self.n_layers):
                    b_q = self.b_q[l - 1].repeat(1, batch_size)
                    q[l] = self.W_q[l - 1] @ F.f(q[l - 1], self.act_fn) + b_q
                x = q[::-1]
                x[self.n_layers - 1] = x_batch
            else:
                x[0] = torch.empty_like(y_batch).normal_(mean=0.0, std=0.1)
                for l in range(1, self.n_layers):
                    b = self.b[l - 1].repeat(1, batch_size)
                    x[l] = self.W[l - 1] @ F.f(x[l - 1], self.act_fn) + b
                x[self.n_layers - 1] = x_batch

            x, errors, its = self.infer_v2(x, batch_size, x_batch, itr_max=itr_max)
            pred_y = x[0]
            acc = mnist_utils.mnist_accuracy(pred_y, y_batch)
            accs.append(acc)
            avg_itr += its
        return accs, avg_itr / n_batches
コード例 #4
0
    def train_epoch(self, imgs, labels, log_error=False):
        img_batches, label_batches, batch_sizes = self._get_batches(
            imgs, labels, self.batch_size)
        n_batches = len(img_batches)
        print(f"training on {n_batches} batches of size {self.batch_size}")

        for batch in range(n_batches):
            batch_size = batch_sizes[batch]
            x = [[] for _ in range(self.n_layers)]
            x[0] = img_batches[batch]
            for l in range(1, self.n_layers):
                x[l] = self.W[l - 1] @ F.f(x[l - 1], self.act_fn) + np.tile(
                    self.b[l - 1], (1, batch_size))
            x[self.n_layers - 1] = label_batches[batch]

            x, errors, _ = self.infer(x, batch_size)

            grad_w = [[] for _ in range(self.n_layers - 1)]
            grad_b = [[] for _ in range(self.n_layers - 1)]

            for l in range(self.n_layers - 1):
                grad_w[l] = (self.vars[-1] * (1 / batch_size) *
                             errors[l + 1] @ F.f(x[l], self.act_fn).T -
                             self.d_rate * self.W[l])
                grad_b[l] = self.vars[-1] * (1 / batch_size) * np.sum(
                    errors[l + 1], axis=1)

            for l in range(self.n_layers - 1):
                self.W[l] = self.W[l] + self.l_rate * grad_w[l]
                self.b[l] = self.b[l] + self.l_rate * np.expand_dims(grad_b[l],
                                                                     axis=1)

            if batch % 50 == 0 and log_error:
                avg_errs = [np.mean(error) for error in errors]
                print(f"batch {batch}/{n_batches}: avg errors {avg_errs}")
コード例 #5
0
def u_next_lax_wendroff(u_last, u_halfstep, delta_t, delta_x, j, time,
                        position):
    u_halfstep[j] = u_next_half_step(u_last, delta_t, delta_x, j, time,
                                     position)
    return u_last[j] - delta_t/delta_x*(func.f(u_halfstep[j])-func.f(u_halfstep[j-1])) \
           + delta_t*func.g(u_halfstep, delta_x, j)\
           + delta_t/2*(func.s(time, position, u_halfstep, j)\
           + func.s(time, position, u_halfstep, j-1))
コード例 #6
0
ファイル: q_network.py プロジェクト: alec-tschantz/predcoding
    def infer_v2(self, x, batch_size, x_batch, itr_max=None):
        """ this version infers top layer, rather than keeping it fixed """
        itr_max = self.itr_max if itr_max is None else itr_max
        errors = [[] for _ in range(self.n_layers)]
        f_x_arr = [[] for _ in range(self.n_layers)]
        f_x_deriv_arr = [[] for _ in range(self.n_layers)]
        f_0 = 0
        its = 0
        beta = self.beta

        x[self.n_layers - 1] = x_batch

        for l in range(1, self.n_layers):
            f_x = F.f(x[l - 1], self.act_fn)
            f_x_deriv = F.f_deriv(x[l - 1], self.act_fn)
            f_x_arr[l - 1] = f_x
            f_x_deriv_arr[l - 1] = f_x_deriv

            # eq. 2.17
            b = self.b[l - 1].repeat(1, batch_size)
            errors[l] = (x[l] - self.W[l - 1] @ f_x - b) / self.vars[l]
            f_0 = f_0 - self.vars[l] * torch.sum(torch.mul(errors[l], errors[l]), dim=0)

        for itr in range(itr_max):
            # TODO (updating top layer)
            g = torch.mul(self.W[0].T @ errors[1], f_x_deriv_arr[0])
            x[0] = x[0] + beta * g 

            # update node activity
            for l in range(1, self.n_layers - 1):
                # eq. 2.18
                g = torch.mul(self.W[l].T @ errors[l + 1], f_x_deriv_arr[l])
                x[l] = x[l] + beta * (-errors[l] + g)

            # update errors
            f = 0
            for l in range(1, self.n_layers):
                f_x = F.f(x[l - 1], self.act_fn)
                f_x_deriv = F.f_deriv(x[l - 1], self.act_fn)
                f_x_arr[l - 1] = f_x
                f_x_deriv_arr[l - 1] = f_x_deriv

                # eq. 2.17
                errors[l] = (x[l] - self.W[l - 1] @ f_x - self.b[l - 1]) / self.vars[l]
                f = f - self.vars[l] * torch.sum(torch.mul(errors[l], errors[l]), dim=0)

            diff = f - f_0
            threshold = self.condition * self.beta / self.vars[self.n_layers - 1]
            if torch.any(diff < 0):
                beta = beta / self.div
            elif torch.mean(diff) < threshold:
                break

            f_0 = f
            its = itr

        return x, errors, its
コード例 #7
0
    def infer(self, x, batch_size, itr_max=None, test=False):
        itr_max = self.itr_max if itr_max is None else itr_max
        errors = [[] for _ in range(self.n_layers)]
        f_x_arr = [[] for _ in range(self.n_layers)]
        f_x_deriv_arr = [[] for _ in range(self.n_layers)]
        f_0 = 0
        its = 0
        beta = self.beta


        for l in range(1, self.n_layers):
            f_x = F.f(x[l - 1], self.act_fn)
            f_x_deriv = F.f_deriv(x[l - 1], self.act_fn)
            f_x_arr[l - 1] = f_x
            f_x_deriv_arr[l - 1] = f_x_deriv

            # eq. 2.17
            b = self.b[l - 1].repeat(1, batch_size)
            errors[l] = (x[l] - self.W[l - 1] @ f_x - b) / self.vars[l]
            f_0 = f_0 - self.vars[l] * torch.sum(torch.mul(errors[l], errors[l]), dim=0)

        for itr in range(itr_max):
            
            if test:
                g = torch.mul(self.W[0].T @ errors[1], f_x_deriv_arr[0])
                x[0] = x[0] + beta * g 

            # update node activity
            for l in range(1, self.n_layers - 1):
                # eq. 2.18
                g = torch.mul(self.W[l].T @ errors[l + 1], f_x_deriv_arr[l])
                x[l] = x[l] + beta * (-errors[l] + g)

            # update errors
            f = 0
            for l in range(1, self.n_layers):
                f_x = F.f(x[l - 1], self.act_fn)
                f_x_deriv = F.f_deriv(x[l - 1], self.act_fn)
                f_x_arr[l - 1] = f_x
                f_x_deriv_arr[l - 1] = f_x_deriv

                # eq. 2.17
                errors[l] = (x[l] - self.W[l - 1] @ f_x - self.b[l - 1]) / self.vars[l]
                f = f - self.vars[l] * torch.sum(torch.mul(errors[l], errors[l]), dim=0)

            diff = f - f_0
            threshold = self.condition * self.beta / self.vars[self.n_layers - 1]
            if torch.any(diff < 0):
                beta = beta / self.div
            elif torch.mean(diff) < threshold:
                print(f"broke @ {its} its")
                break

            f_0 = f
            its = itr

        return x, errors, its
コード例 #8
0
ファイル: q_network.py プロジェクト: alec-tschantz/predcoding
    def train_epoch(self, x_batches, y_batches, epoch_num=None):
        """ x_batch are images, y_batch are labels
        TODO 0 is highest and lowest layer, fix this 
        """
        init_err = 0
        end_err = 0
        avg_itr = 0
        n_batches = len(x_batches)

        for batch_id, (x_batch, y_batch) in enumerate(zip(x_batches, y_batches)):
            if batch_id % 500 == 0 and batch_id > 0:
                print(f"batch {batch_id}")

            x_batch = set_tensor(x_batch, self.device)
            y_batch = set_tensor(y_batch, self.device)
            batch_size = x_batch.size(1)

            x = [[] for _ in range(self.n_layers)]
            q = [[] for _ in range(self.n_layers)]

            if self.amortised is True:
                q[0] = x_batch
                for l in range(1, self.n_layers):
                    b_q = self.b_q[l - 1].repeat(1, batch_size)
                    q[l] = self.W_q[l - 1] @ F.f(q[l - 1], self.act_fn) + b_q

                x = q[::-1]
                x[0] = y_batch
                x[self.n_layers - 1] = x_batch

            else:
                x[0] = y_batch
                for l in range(1, self.n_layers):
                    b = self.b[l - 1].repeat(1, batch_size)
                    x[l] = self.W[l - 1] @ F.f(x[l - 1], self.act_fn) + b
                x[self.n_layers - 1] = x_batch

            init_err += self.get_errors(x, batch_size)
            x, errors, its = self.infer(x, batch_size)
            self.update_params(
                x,
                q,
                errors,
                batch_size,
                x_batch,
                y_batch,
                epoch_num=epoch_num,
                n_batches=n_batches,
                curr_batch=batch_id,
            )
            end_err += self.get_errors(x, batch_size)
            avg_itr += its

        return end_err / n_batches, init_err / n_batches, avg_itr / n_batches
コード例 #9
0
ファイル: RWM.py プロジェクト: niko992/StochasticProcess
def RWM2(s2, P, f):
    M = np.ceil(P / 2)
    C = Cmat(P)
    zero = np.zeros((P,))
    Csi_old = np.random.multivariate_normal(zero, C)
    q = np.zeros((cts.N,))
    q[0]=f(Csi_old)
    for i in range(1, cts.N):
        eps = np.random.multivariate_normal(zero, s2 * C)
        Z = np.sqrt(1 - s2) * Csi_old + eps
        U = np.random.uniform(0, 1)
        if U < np.min([fct.f(Z, M) / fct.f(Csi_old, M), 1]):
            Csi_old = Z
        q[i]=f(Csi_old)
    return q
コード例 #10
0
ファイル: q_network.py プロジェクト: alec-tschantz/predcoding
 def get_errors(self, x, batch_size):
     total_err = 0
     for l in range(1, self.n_layers - 1):
         b = self.b[l - 1].repeat(1, batch_size)
         err = (x[l] - self.W[l - 1] @ F.f(x[l - 1], self.act_fn) - b) / self.vars[l]
         total_err += torch.sum(torch.mul(err, err), dim=0)
     return torch.sum(total_err)
コード例 #11
0
ファイル: newtonzero.py プロジェクト: jjabbour/MCSC-6030G
def find_zero(x0):
    ''' Estimate the n'th root of a using Newton's method. 
    Requires an initial guess x0
    Returns:
        the estimate, if Newton's method converged,
        999999 if it did not converge in maxiter iterations.
    '''
    x = x0  # initial guess
    # Keep track of the range of x values used in iteration,
    # for plotting purposes.  Initialize to initial point:
    xmin = x
    xmax = x
    # open file for writing iterates
    outfile = open('newtonpts.txt', 'w')

    # Newton iteration to find a zero of f(x) = x-cos(x).

    converged = False
    for j in range(2 * maxiter + 1):
        # evaluate function and its derivative:
        fx = fun.f(x)
        fxprime = fun.fprime(x)

        # compute Newton increment x:
        delta = fx / fxprime

        # print values to watch convergence:
        print "j=%3d, x=%25.15g, f(x)=%16.6g" % (j, x, fx)
        # save x and fx for plotting purposes:
        outfile.write('%16.6e %16.6e\n' % (x, fx))
        # update x:
        x -= delta

        # update min and max seen so far:
        xmin = min(x, xmin)
        xmax = max(x, xmax)

        # check for convergence:
        if (abs(delta) < TOL):
            converged = True
            break

    outfile.close()
    print 'Number of iterations: %d' % j

    # Increase the range of x a bit for better plots:
    xtra = 0.2 * (xmax - xmin)
    xmin -= xtra
    xmax += xtra

    # Print out values of x over expanded interval for plotting:
    fvals(xmin, xmax)
    # If iteration didn't converge, set a special value:
    if converged:
        return x
    else:
        return 999999.e0
コード例 #12
0
ファイル: secant.py プロジェクト: eliasblanc/MOVI_HT1
def secant(x0, x1, eps):
    iterations = 0
    tmp = 0
    while abs(x1 - x0) > eps:
        iterations += 1
        tmp = x1
        x1 = x1 - (x1 - x0) * df(x1) / (df(x1) - df(x0))
        x0 = tmp
    return x1, f(x1), iterations
コード例 #13
0
ファイル: newtonzero.py プロジェクト: jjabbour/MCSC-6030G
def fvals(xmin, xmax):
    ''' Print out values of the function f(x) = x**n - a for plotting purposes,
    in the interval xmin to xmax.  
    nval, the number of values to print, is a module parameter.
    '''
    import numpy as np
    x = np.linspace(xmin, xmax, nvals)
    fx = fun.f(x)
    A = np.vstack([x, fx]).T
    np.savetxt('fvals.txt', A)
    return None
コード例 #14
0
ファイル: network.py プロジェクト: alec-tschantz/predcoding
    def generate_data(self, x_batch):
        x_batch = set_tensor(x_batch, self.device)
        batch_size = x_batch.size(1)

        x = [[] for _ in range(self.n_layers)]
        x[0] = x_batch
        for l in range(1, self.n_layers):
            b = self.b[l - 1].repeat(1, batch_size)
            x[l] = self.W[l - 1] @ F.f(x[l - 1], self.act_fn) + b
        pred_y = x[-1]
        return pred_y
コード例 #15
0
ファイル: RWM.py プロジェクト: niko992/StochasticProcess
def RWM3(s2, P, f):
    M = np.ceil(P / 2)
    C = Cmat(P)
    zero = np.zeros((P,))
    q = np.zeros((cts.N,))
    alpha = 0.000001
    ID = np.diag(np.ones(P))
    dictionary = sp.optimize.minimize(
        fct.minus_log_posterior,  np.random.multivariate_normal(zero, C), args=(M), method='BFGS')
    csi_map = dictionary['x']
    Csi_old = np.random.multivariate_normal(zero, C)
    q[0]=f(Csi_old)
    H = dictionary['hess_inv'] + alpha*ID
    for i in range(1, cts.N):
        eps = np.random.multivariate_normal(zero, H)
        Z = csi_map + eps
        U = np.random.uniform(0, 1)
        if U < np.min([fct.f(Z, M) / fct.f(Csi_old, M), 1]):
            Csi_old = Z
        q[i]=f(Csi_old)
    return q
コード例 #16
0
ファイル: bisection.py プロジェクト: eliasblanc/MOVI_HT1
def bisection(a, b, eps):
    x = (a + b) / 2
    iterations = 0
    if df(a) * df(b) < 0:
        while (b - a) / 2 > eps:
            iterations += 1
            dfa0 = df(x)
            if dfa0 * df(b) < 0:
                a = x
                x = (a + b) / 2
            elif df(a) * dfa0 < 0:
                b = x
                x = (a + b) / 2
            else:
                return x, f(x), iterations
        return x, f(x), iterations
    elif df(a) * df(b) > 0:
        print(f"There are no roots on the [{a}, {b}]")
        return
    else:
        return x, f(x), iterations
コード例 #17
0
def golden_ratio(a, b, eps):
    x_1 = b - (b - a) / tau
    x_2 = a + (b - a) / tau
    fx1 = f(x_1)
    fx2 = f(x_2)
    iterations = 0
    while (b - a) / 2 > eps:
        iterations += 1
        if fx1 > fx2:
            a = x_1
            x_1 = x_2
            x_2 = b - (x_1 - a)
            fx1 = fx2
            fx2 = f(x_2)
        else:
            b = x_2
            x_2 = x_1
            x_1 = a + (b - x_2)
            fx2 = fx1
            fx1 = f(x_1)
    return (a + b) / 2, f((a + b) / 2), iterations
コード例 #18
0
ファイル: network.py プロジェクト: alec-tschantz/predcoding
    def update_params(self, x, errors, batch_size, epoch_num=None, n_batches=None, curr_batch=None):
        grad_w = [[] for _ in range(self.n_layers - 1)]
        grad_b = [[] for _ in range(self.n_layers - 1)]

        for l in range(self.n_layers - 1):
            # eq. 2.19 (with weight decay)
            grad_w[l] = (
                self.vars[-1] * (1 / batch_size) * errors[l + 1] @ F.f(x[l], self.act_fn).T
                - self.d_rate * self.W[l]
            )
            grad_b[l] = self.vars[-1] * (1 / batch_size) * torch.sum(errors[l + 1], axis=1)

        self._apply_gradients(grad_w, grad_b, epoch_num=epoch_num, n_batches=n_batches, curr_batch=curr_batch)
コード例 #19
0
ファイル: q_network.py プロジェクト: alec-tschantz/predcoding
    def test_amortised_epoch(self, x_batches, y_batches):
        accs = []
        for x_batch, y_batch in zip(x_batches, y_batches):
            x_batch = set_tensor(x_batch, self.device)
            y_batch = set_tensor(y_batch, self.device)
            batch_size = x_batch.size(1)

            q = [[] for _ in range(self.n_layers)]
            q[0] = x_batch
            for l in range(1, self.n_layers):
                b_q = self.b_q[l - 1].repeat(1, batch_size)
                q[l] = self.W_q[l - 1] @ F.f(q[l - 1], self.act_fn) + b_q
            pred_y = q[-1]
            acc = mnist_utils.mnist_accuracy(pred_y, y_batch)
            accs.append(acc)
        return accs
コード例 #20
0
ファイル: network.py プロジェクト: alec-tschantz/predcoding
    def test_epoch(self, x_batches, y_batches):
        accs = []
        for x_batch, y_batch in zip(x_batches, y_batches):
            x_batch = set_tensor(x_batch, self.device)
            y_batch = set_tensor(y_batch, self.device)
            batch_size = x_batch.size(1)

            x = [[] for _ in range(self.n_layers)]
            x[0] = x_batch
            for l in range(1, self.n_layers):
                b = self.b[l - 1].repeat(1, batch_size)
                x[l] = self.W[l - 1] @ F.f(x[l - 1], self.act_fn) + b
            pred_y = x[-1]

            acc = mnist_utils.mnist_accuracy(pred_y, y_batch)
            accs.append(acc)
        return accs
コード例 #21
0
ファイル: q_network.py プロジェクト: alec-tschantz/predcoding
    def update_params(
        self, x, q, errors, batch_size, x_batch, y_batch, epoch_num=None, n_batches=None, curr_batch=None
    ):

        grad_w = [[] for _ in range(self.n_layers - 1)]
        grad_b = [[] for _ in range(self.n_layers - 1)]
        grad_w_q = [[] for _ in range(self.n_layers - 1)]
        grad_b_q = [[] for _ in range(self.n_layers - 1)]

        for l in range(self.n_layers - 1):
            # eq. 2.19 (with weight decay)
            grad_w[l] = (
                self.vars[-1] * (1 / batch_size) * errors[l + 1] @ F.f(x[l], self.act_fn).T
                - self.d_rate * self.W[l]
            )
            grad_b[l] = self.vars[-1] * (1 / batch_size) * torch.sum(errors[l + 1], axis=1)

        if self.amortised:
            q = q[::-1]

            q_errs = [[] for _ in range(self.n_layers - 1)]
            q_errs[0] = x[2] - q[2]
            fn_deriv = F.f_deriv(torch.matmul(x_batch.T, self.W_q[0].T), self.act_fn)
            grad_w_q[0] = torch.matmul(x[3], q_errs[0].T * fn_deriv)
            grad_b_q[0] = self.vars[-1] * (1 / batch_size) * torch.sum(q_errs[0], axis=1)

            q_errs[1] = x[1] - q[1]
            fn_deriv = F.f_deriv(torch.matmul(x[2].T, self.W_q[1].T), self.act_fn)
            grad_w_q[1] = torch.matmul(x[2], q_errs[1].T * fn_deriv)
            grad_b_q[1] = self.vars[-1] * (1 / batch_size) * torch.sum(q_errs[1], axis=1)

            # q_errs[2] = x[0] - q[0]
            q_errs[2] = y_batch - q[0]
            fn_deriv = F.f_deriv(torch.matmul(x[1].T, self.W_q[2].T), self.act_fn)
            grad_w_q[2] = torch.matmul(x[1], q_errs[2].T * fn_deriv)
            grad_b_q[2] = self.vars[-1] * (1 / batch_size) * torch.sum(q_errs[2], axis=1)

        self._apply_gradients(
            grad_w,
            grad_b,
            grad_w_q,
            grad_b_q,
            epoch_num=epoch_num,
            n_batches=n_batches,
            curr_batch=curr_batch,
        )
コード例 #22
0
    def train_epoch(self, img_batches, label_batches, epoch_num=None):
        init_err = 0
        end_err = 0
        avg_itr = 0
        n_batches = len(img_batches)

        for batch_id, (img_batch, label_batch) in enumerate(zip(img_batches, label_batches)):
            img_batch = set_tensor(img_batch, self.device)
            label_batch = set_tensor(label_batch, self.device)
            batch_size = img_batch.size(1)

            # activations / mu
            x = [[] for _ in range(self.n_layers)]

            # amortised forward
            x[0] = img_batch
            for l in range(1, self.n_layers):
                b_q = self.b_q[l - 1].repeat(1, batch_size)
                x[l] = self.W_q[l - 1] @ F.f(x[l - 1], self.act_fn) + b_q

            # reverse order
            x = x[::-1]
            x[0] = label_batch
            x[self.n_layers - 1] = img_batch
            init_err += self.get_errors(x, batch_size)

            # inference
            x, errors, q_errors, its = self.hybrid_infer(x, batch_size)
            
            end_err += self.get_errors(x, batch_size)
            avg_itr += its

            self.update_params(
                x,
                errors,
                q_errors,
                batch_size,
                img_batch,
                label_batch,
                epoch_num=epoch_num,
                n_batches=n_batches,
                curr_batch=batch_id,
            )

        return end_err / n_batches, init_err / n_batches, avg_itr / n_batches
コード例 #23
0
    def test(self, imgs, labels):
        img_batches, label_batches, batch_sizes = self._get_batches(
            imgs, labels, self.batch_size)
        n_batches = len(img_batches)
        print(f"testing on {n_batches} batches of size {self.batch_size}")

        accs = []
        for batch in range(n_batches):
            batch_size = batch_sizes[batch]
            x = [[] for _ in range(self.n_layers)]
            x[0] = img_batches[batch]
            for l in range(1, self.n_layers):
                x[l] = self.W[l - 1] @ F.f(x[l - 1], self.act_fn) + np.tile(
                    self.b[l - 1], (1, batch_size))
            acc = mnist_utils.mnist_accuracy(x[-1], label_batches[batch])
            accs.append(acc)

        print(f"average accuracy {np.mean(np.array(accs))}")
コード例 #24
0
ファイル: network.py プロジェクト: alec-tschantz/predcoding
    def train_epoch(self, x_batches, y_batches, epoch_num=None):
        n_batches = len(x_batches)
        for batch_id, (x_batch, y_batch) in enumerate(zip(x_batches, y_batches)):

            if batch_id % 500 == 0 and batch_id > 0:
                print(f"batch {batch_id}")

            x_batch = set_tensor(x_batch, self.device)
            y_batch = set_tensor(y_batch, self.device)
            batch_size = x_batch.size(1)

            x = [[] for _ in range(self.n_layers)]
            x[0] = x_batch
            for l in range(1, self.n_layers):
                b = self.b[l - 1].repeat(1, batch_size)
                x[l] = self.W[l - 1] @ F.f(x[l - 1], self.act_fn) + b
            x[self.n_layers - 1] = y_batch

            x, errors, _ = self.infer(x, batch_size)
            self.update_params(
                x, errors, batch_size, epoch_num=epoch_num, n_batches=n_batches, curr_batch=batch_id
            )
コード例 #25
0
    def test_epoch(self, img_batches, label_batches, itr_max=None):
        accs = []
        n_batches = len(img_batches)
        avg_itr = 0
        for img_batch, label_batch in zip(img_batches, label_batches):
            img_batch = set_tensor(img_batch, self.device)
            label_batch = set_tensor(label_batch, self.device)
            batch_size = img_batch.size(1)

            x = [[] for _ in range(self.n_layers)]
            x[0] = img_batch
            for l in range(1, self.n_layers):
                b_q = self.b_q[l - 1].repeat(1, batch_size)
                x[l] = self.W_q[l - 1] @ F.f(x[l - 1], self.act_fn) + b_q
            
            x = x[::-1]
            x[self.n_layers - 1] = img_batch

            x, errors, q_errors, its = self.hybrid_infer(x, batch_size, itr_max=itr_max, test=True)
            pred_labels = x[0]
            acc = mnist_utils.mnist_accuracy(pred_labels, label_batch)
            accs.append(acc)
            avg_itr += its
        return accs, avg_itr / n_batches
コード例 #26
0
def test_f():
    f()
コード例 #27
0
def test_f_of_neg1_equals_0():
    assert f(-1) == 0
コード例 #28
0
def test_f_of_41_equals_42():
    assert f(41) == 42
コード例 #29
0
def test_f_of_6_equals_7():
    assert f(6) == 7
コード例 #30
0
def evaluate(f, planner, estimator, dynam=False):
    # prepare data
    X, z = [], []
    N = 5
    # grid
    for x in [i / N for i in range(1, N)]:
        for y in [i / N for i in range(1, N)]:
            X.append((x, y))
            z.append(f(x, y))
    # # alternative to grid: random points
    # for x in [i/N for i in range(1,N)]:
    #     for y in [i/N for i in range(1,N)]:
    #         x = random.random()
    #         y = random.random()
    #         X.append((x,y))
    #         z.append(f(x,y))
    #         print("{}\t&{}\t&{}\\\\".format(x,y,z[-1]))

    # test preliminary forecasting part
    mesh = []
    for i in range(101):
        for j in range(101):
            x, y = i / 100., j / 100.
            mesh.append((x, y))

    # treina em X, z e retorna valores preditos para os pontos no mesh
    z0 = estimator(X, z, mesh)

    # calcula erro preliminar no mesh
    prelim = 0
    for i in range(len(mesh)):
        (x, y) = mesh[i]
        prelim += abs(f(x, y) - float(z0[i]))

    # test planning part

    # calcula rota
    route = planner(X, z, f, dynam)

    # calcula duração da rota
    tsp_et = 0  # elapsed time
    (xt, yt) = (inst.x0, inst.y0)
    for (x, y) in route:
        tsp_et += dist(xt, yt, x, y) / inst.s + inst.t
        xt, yt = x, y
        X.append((x, y))
        z.append(f(x, y))
        # print("probing at ({:8.5g},{:8.5g}) --> \t{:8.5g}".format(x, y, z[-1]))
        print("{:8.5g}\t{:8.5g}\t{:8.5g}".format(x, y, z[-1]))

    # # # plot posteriori GP
    # from sklearn.gaussian_process import GaussianProcessRegressor
    # from sklearn.gaussian_process.kernels import RBF, ConstantKernel, WhiteKernel, Matern, DotProduct, RationalQuadratic
    # kernel = RationalQuadratic(length_scale_bounds=(0.08, 100)) + WhiteKernel(noise_level_bounds=(1e-5, 1e-2))
    # from functions import plot
    # GPR = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=10)
    # GPR.fit(X, z)
    # def GP(x_,y_):
    #     return GPR.predict([(x_,y_)])[0]
    # plot(GP,100)
    # # # end of plot
    pass

    # acrescenta tempo de retorno ao porto
    tsp_et += dist(xt, yt, inst.x0, inst.y0) / inst.s

    if tsp_et > inst.T + EPS:
        print("tour length infeasible:", tsp_et)
        # return INF    # route takes longer than time limit

    # test forecasting part
    mesh = []
    for i in range(101):
        for j in range(101):
            x, y = i / 100., j / 100.
            mesh.append((x, y))
    z = estimator(X, z, mesh)
    final = 0
    for i in range(len(mesh)):
        (x, y) = mesh[i]
        final += abs(f(x, y) - float(z[i]))

    return prelim, tsp_et, final
コード例 #31
0
ファイル: 53.py プロジェクト: mikegw/project_euler
def choose(n,r):
    return f(n)/(f(r)*f(n-r))
コード例 #32
0
ファイル: 34.py プロジェクト: mikegw/project_euler
def factsum(n):
    l_n = [int(str(n)[i]) for i in range(len(str(n)))]
    return(sum([f(i) for i in l_n]))
コード例 #33
0
ファイル: 34.py プロジェクト: mikegw/project_euler
from functions import factorial as f

print(8*f(9))

def factsum(n):
    l_n = [int(str(n)[i]) for i in range(len(str(n)))]
    return(sum([f(i) for i in l_n]))

ans = 0
for i in range(3,300000):
    if factsum(i) == i:
        print(i)
        ans += i
print(ans)