예제 #1
0
def fixed_point(t, M, I, a2):
    l=7
    I = sci.float128(I)
    M = sci.float128(M)
    a2 = sci.float128(a2)
    f = 2*sci.pi**(2*l)*sci.sum(I**l*a2*sci.exp(-I*sci.pi**2*t))
    for s in range(l, 1, -1):
        K0 = sci.prod(xrange(1, 2*s, 2))/sci.sqrt(2*sci.pi)
        const = (1 + (1/2)**(s + 1/2))/3
        time=(2*const*K0/M/f)**(2/(3+2*s))
        f=2*sci.pi**(2*s)*sci.sum(I**s*a2*sci.exp(-I*sci.pi**2*time))
    return t-(2*M*sci.sqrt(sci.pi)*f)**(-2/5)
예제 #2
0
def fixed_point(t, M, I, a2):
    l=7
    I = sci.float128(I)
    M = sci.float128(M)
    a2 = sci.float128(a2)
    f = 2*sci.pi**(2*l)*sci.sum(I**l*a2*sci.exp(-I*sci.pi**2*t))
    for s in range(l, 1, -1):
        K0 = sci.prod(xrange(1, 2*s, 2))/sci.sqrt(2*sci.pi)
        const = (1 + (1/2)**(s + 1/2))/3
        time=(2*const*K0/M/f)**(2/(3+2*s))
        f=2*sci.pi**(2*s)*sci.sum(I**s*a2*sci.exp(-I*sci.pi**2*time))
    return t-(2*M*sci.sqrt(sci.pi)*f)**(-2/5)
예제 #3
0
    def fit(self, X, y):
        idx = np.random.permutation(len(y))

        if self.validation:
            traintestsplit = len(y)*.01
            validx = idx[-traintestsplit:]
            trainidx = idx[:-traintestsplit]
            Xval = X[validx,:].copy()
            Yval = y[validx].copy()
            X = X[trainidx,:]
            y = y[trainidx]

        self.X = X
        self.y = y

        if self.verbose:
            if self.validation:
                print "Training DSEKL on %d samples and validating on %d"%(len(idx),traintestsplit)
            else:
                print "Training DSEKL on %d samples" % (len(idx))
            print "using %i workers"%(self.workers)

        self.classes_ = sp.unique(y)
        assert(all(self.classes_==[-1.,1.]))

        w = sp.float128(sp.randn(len(y)))
        G = sp.ones(len(y))
        if self.validation:
            self.valErrors = []

        self.trainErrors = []
        self.w = w.copy()
        oldw = w.copy()
        it = 0



        data_name = os.path.join(home_dir_server, 'data')
        dump(X, data_name)
        self.X = load()
        self.X = load(data_name, mmap_mode='r')
        target_name = os.path.join(home_dir_server, 'target')
        dump(y, target_name)
        self.y = load(target_name, mmap_mode='r')
        w_name = os.path.join(home_dir_server, 'weights')
        w = np.memmap(w_name, dtype=sp.float128, shape=(len(y)), mode='w+')

        delta_w = 5
        while(it < int(self.n_its / self.workers) and delta_w > 1.):
            it += 1

            if self.verbose and it != 1:
                print "iteration %i of %0.2f" % (it, int(self.n_its / self.workers))
                if it * self.workers % 1 == 0:
                    if self.validation:
                        val_error = (sp.sign(self.predict(Xval)) != Yval).mean()
                        self.valErrors.append(val_error)
                        print "Validation-Error: %0.2f, discount: %0.10f"%(val_error,self.eta)

                    print datetime.datetime.now()
                    print "%i iterations, dicscount: %0.10f change w: %0.2f" % \
                          (it,
                           self.eta,
                           sp.linalg.norm(oldw - w))


            oldw = w.copy()
            seeds = np.random.randint(0, high=4294967295, size=self.workers)
            gradients = Parallel(n_jobs=-1)(delayed(svm_gradient)(self.X, self.y, \
                                                                       w.copy(), self.n_pred_samples, self.n_expand_samples, C=self.C, sigma=self.gamma, seed=seeds[i]) for i in range(self.workers))

            tmpw = sp.zeros(len(y))
            for g in gradients:
                if self.damp:
                    G[g[1]] += g[0]**2
                tmpw[g[1]] += g[0]

            for i in tmpw.nonzero()[0]:
                if self.damp:
                    w[i] -= self.eta * tmpw[i] / sp.sqrt(G[i])
                else:
                    w[i] -= self.eta * tmpw[i] /float(len(gradients))#/ sp.sqrt(G[i])

            self.eta = self.eta * self.eta_start
            self.w = w.copy()
            delta_w = sp.linalg.norm(oldw - w)
        self.w = w.copy()
        return self
예제 #4
0
    def fit(self, X, y):
        idx = np.random.permutation(len(y))

        if self.validation:
            traintestsplit = len(y)*0.002
            validx = idx[-traintestsplit:]
            trainidx = idx[:-traintestsplit]
            Xval = X[validx,:].copy()
            Yval = y[validx].copy()
            X = X[trainidx,:]
            y = y[trainidx]

        # divide in batches

        # number of batches for all data
        num_batch_pred = X.shape[0] / float(self.n_pred_samples)
        num_batch_exp = X.shape[0] / float(self.n_expand_samples)
        self.X_pred_ids = []
        self.y_pred_ids = []
        for i in range(0,int(num_batch_pred)):
            self.X_pred_ids.append(range((i) * self.n_pred_samples, (i+1) * self.n_pred_samples))
            self.y_pred_ids.append(range((i) * self.n_pred_samples, (i+1) * self.n_pred_samples))
        if (num_batch_pred - int(num_batch_pred)) > 0:
            self.X_pred_ids.append(range(int(num_batch_pred) * self.n_pred_samples, X.shape[0]))
            self.y_pred_ids.append(range(int(num_batch_pred) * self.n_pred_samples, y.shape[0]))

        self.X_exp_ids = []
        self.y_exp_ids = []
        for i in range(0,int(num_batch_exp)):
            self.X_exp_ids.append(range((i) * self.n_expand_samples, (i+1) * self.n_expand_samples))
            self.y_exp_ids.append(range((i) * self.n_expand_samples, (i+1) * self.n_expand_samples))
        if (num_batch_exp - int(num_batch_exp)) > 0:
            self.X_exp_ids.append(range(int(num_batch_exp) * self.n_pred_samples, X.shape[0]))
            self.y_exp_ids.append(range(int(num_batch_exp) * self.n_pred_samples, y.shape[0]))
        self.X = X
        self.y = y

        if self.verbose:
            if self.validation:
                print "Training DSEKL on %d samples and validating on %d"%(len(idx),traintestsplit)
            else:
                print "Training DSEKL on %d samples" % (len(idx))

            print "\nhyperparameters:\nn_expand_samples: ",self.n_expand_samples,"\nn_pred_samples: ",self.n_pred_samples,"\neta: ",self.eta_start,"\nC: ",self.C,"\ngamma: ",self.gamma,"\nworker: ",self.workers,"\ndamp: ",self.damp,"\nvalidation: ",self.validation,"\nbenefit_through_distribution_num_workers:",self.benefit_through_distribution_no_workers,"\n"

        self.classes_ = sp.unique(y)
        assert(all(self.classes_==[-1.,1.]))


        w = sp.float128(sp.randn(len(y)))
        G = sp.ones(len(y))
        if self.validation:
            self.valErrors = []

        self.trainErrors = []
        self.w = w.copy()
        oldw = w.copy()
        it = 0
        delta_w = 50
        if (self.benefit_through_distribution_no_workers != -1):
            t_start = datetime.datetime.now()
            print "iterations start:",t_start
        while ( it < self.n_its and delta_w > 1.):
            it += 1

            if self.verbose and it != 1:
                print "iteration %i of %0.2f" % (it, self.n_its)
                if it * self.workers % 1 == 0:
                    if self.validation:
                        val_error = (sp.sign(self.predict(Xval)) != Yval).mean()
                        self.valErrors.append(val_error)
                        print "Validation-Error: %0.2f, discount: %0.10f"%(val_error,self.eta)


                    print datetime.datetime.now()
                    print "%i iterations, dicscount: %0.10f change w: %0.2f" % \
                          (it,
                           self.eta,
                           sp.linalg.norm(oldw - w))


            oldw = w.copy()


            for i in range(0,len(self.X_pred_ids)):
                self.X_pred_ids[i] = shuffle(self.X_pred_ids[i])
            self.X_pred_ids = shuffle(self.X_pred_ids)

            # print "X_pred_ids:\n",self.X_pred_ids
            for i in range(0,len(self.X_pred_ids)):


                X_pred_id = self.X_pred_ids[i]
                if self.verbose:
                    print "training on batch:",i, " of ",len(self.X_pred_ids), datetime.datetime.now()
                if self.benefit_through_distribution_no_workers == -1:
                    gradients = Parallel(n_jobs=self.workers,max_nbytes=None,verbose=0) (delayed(svm_gradient_batch_fast)(self.X[X_pred_id,:], self.X[X_exp_id],self.y,X_pred_id, X_exp_id,w.copy(), C=self.C, sigma=self.gamma) for X_exp_id in self.X_exp_ids)
                else:
                    #gradients = Parallel(n_jobs=self.benefit_through_distribution_no_workers,batch_size=self.benefit_through_distribution_no_workers,pre_dispatch=self.benefit_through_distribution_no_workers,max_nbytes=None,verbose=1) (delayed(svm_gradient_batch_fast)(self.X[X_pred_id,:], self.X[X_exp_id],self.y,X_pred_id, X_exp_id,w.copy(), C=self.C, sigma=self.gamma) for X_exp_id in self.X_exp_ids)
                    gradients = Parallel(n_jobs=self.benefit_through_distribution_no_workers,
                                         #pre_dispatch=self.benefit_through_distribution_no_workers,
                                         #batch_size=self.benefit_through_distribution_no_workers,
                                         max_nbytes=None,
                                         verbose=1) (delayed(svm_gradient_batch_fast)(self.X[X_pred_id,:], self.X[X_exp_id],self.y,X_pred_id, X_exp_id,w.copy(), C=self.C, sigma=self.gamma) for X_exp_id in self.X_exp_ids)
                print "iterations took in total:", datetime.datetime.now() - t_start
                if (self.benefit_through_distribution_no_workers != -1):
                    break
                tmpw = sp.zeros(len(y))
                for g in gradients:
                    if self.damp:
                        G[g[1]] += g[0]**2
                    tmpw[g[1]] += g[0]

                for i in tmpw.nonzero()[0]:
                    if self.damp:
                        w[i] -= self.eta * tmpw[i] / sp.sqrt(G[i])
                    else:
                        w[i] -= self.eta * tmpw[i] /float(len(gradients))#/ sp.sqrt(G[i])

            self.eta = 1./float(it)#self.eta * self.eta_start
            self.w = w
            delta_w = sp.linalg.norm(oldw - w)

        self.w = w.copy()
        return self
def main():

    # create element and quadrature
    ele = TriangleElement(array([1, 0]), array([0, 0.5]), array([0, 0]))
    ele.quad = local_nodal_quadrature_factory(ele)

    # Set initial conditions:

    # Varying in space, no applied field
    initialise_solution(ele, m_initial)
    happ = array([0, 0, 0])
    damping = sp.float128(0.5)
    dt = sp.float128(0.1)
    tmax = sp.float128(1.0)

    # # Constant in space, reversal under applied field
    # initialise_solution(ele, m_initial_z)
    # happ = array([0, 0, -1.1])
    # damping = 0.5
    # dt = 0.1
    # tmax = 1.0

    # Define the residual function in terms of m and dmdt.
    def residual(time, m, dmdt, dprint=False, check_global=False):

        # Calculate residuals for each test function
        rs = []
        for i_node in ele.nodei():

            # Integrand as a function only of local coord
            def integrand(s):
                x_s = ele.interpolate(time, s, lambda n: n.x)
                m_s = ele.interpolate_data(time, s, m)
                dmdx_s = ele.interpolate_data_dx(time, s, m)
                dmdt_s = ele.interpolate_data(time, s, dmdt)
                test_s = ele.test(s, i_node)
                dtestdx_s = ele.dtestdx(s, i_node)

                # Hook for debug printing:
                if dprint:
                    pass

                if check_global:
                    assert all(x_s == s)

                return llg_residual_integrand(time, x_s, m_s, dmdt_s, dmdx_s, test_s, dtestdx_s, happ, damping) * ele.J(
                    s
                )

            rs.append(ele.quad.integrate(integrand))

        return array(rs)

    def print_residual(time):
        m = array([(n.m + n.mprev) / 2 for n in ele.nodes])
        dmdt = array([(n.m - n.mprev) / dt for n in ele.nodes])
        print(residual(time, m, dmdt, True))

    def my_actions_after_timestep(t, new_m):
        """Actions after time step: store new values and print.
        """
        # Update nodal values
        for new_m_node, n in zip(new_m, ele.nodes):
            n.mprev = array(copy.deepcopy(n.m))
            n.m = array(copy.deepcopy(new_m_node))

        # Print
        output_solution(t, ele)

    # Doc initial solution
    output_solution(0.0, ele)
    print_residual(0.0)

    # Get initial condition
    m0 = array([n.m for n in ele.nodes])

    # Integrate
    ts, ms = time_integrate(residual, m0, dt, tmax, actions_after_time_step=my_actions_after_timestep)

    return 0