Example #1
0
 def fit(self, epochs=1, batch_size=1, gamma=0.9, **args):
     X = args['X_train']
     y = args['y_train']
     if 'verbose' in args:
         verbose = args['verbose']
     else:
         verbose = None
     epochs = int(epochs)
     loss_val = cp.zeros((cp.int(epochs)))
     par_gpu = deepcopy(self.start)
     momentum = {
         var: cp.zeros_like(self.start[var])
         for var in self.start.keys()
     }
     #n_data=cp.float(y.shape[0])
     for i in tqdm(range(np.int(epochs))):
         for X_batch, y_batch in self.iterate_minibatches(X, y, batch_size):
             n_batch = cp.float(y_batch.shape[0])
             grad_p = self.model.grad(par_gpu,
                                      X_train=X_batch,
                                      y_train=y_batch)
             for var in par_gpu.keys():
                 momentum[var] = gamma * momentum[
                     var] - self.step_size * grad_p[var]
                 par_gpu[var] += momentum[var]
         loss_val[i] = self.model.log_likelihood(par_gpu,
                                                 X_train=X_batch,
                                                 y_train=y_batch)
         if verbose and (i % (epochs / 10) == 0):
             print('loss: {0:.8f}'.format(cp.asnumpy(loss_val[i])))
     return par_gpu, cp.asnumpy(loss_val)
Example #2
0
 def fit_dropout(self, epochs=1, batch_size=1, p=0.5, gamma=0.9, **args):
     X = args['X_train']
     y = args['y_train']
     if 'verbose' in args:
         verbose = args['verbose']
     else:
         verbose = None
     loss_val = cp.zeros((cp.int(epochs)))
     par_gpu = deepcopy(self.start)
     momemtum = {var: cp.zeros_like(par_gpu[var]) for var in par_gpu.keys()}
     for i in range(int(epochs)):
         for batch in self.iterate_minibatches(X, y, batch_size):
             X_batch, y_batch = batch
             Z = cp.random.binomial(1, p, size=X_batch.shape)
             X_batch_dropout = cp.multiply(X_batch, Z)
             grad_p = self.model.grad(par_gpu,
                                      X_train=X_batch_dropout,
                                      y_train=y_batch)
             for var in par_gpu.keys():
                 momemtum[var] = gamma * momemtum[
                     var] + -self.step_size * grad_p[var]
                 par_gpu[var] += momemtum[var]
         loss_val[i] = self.model.negative_log_posterior(par_gpu,
                                                         X_train=X_batch,
                                                         y_train=y_batch)
         if verbose and (i % (epochs / 10) == 0):
             print('loss: {0:.4f}'.format(cp.asnumpy(loss_val[i])))
     return par_gpu, loss_val
Example #3
0
    def fit(self, X, y=None):
        """Fit training data and  construct histogram.
        The type of histogram is 'regular', and right-open
        Note: If n_bins=None, the number of breaks is being computed as in:
        L. Birge, Y. Rozenholc, How many bins should be put in a regular
        histogram? 2006.
        X (cupy.ndarray) : NxD training sample. 
             
        """
        nrows, n_components = X.shape
        if not self.n_bins:
            self.n_bins = int(1 * (nrows ** 1) * (cp.log(nrows) ** -1))
        n_nonzero_components = cp.sqrt(n_components)
        n_zero_components = n_components - cp.int(n_nonzero_components)

        self.projections = cp.random.randn(self.n_random_cuts, n_components)
        self.histograms = cp.zeros([self.n_random_cuts, self.n_bins])
        self.limits = cp.zeros((self.n_random_cuts, self.n_bins + 1))
        for i in range(self.n_random_cuts):
            rands = cp.random.permutation(n_components)[:n_zero_components]
            self.projections[i, rands] = 0.
            projected_data = self.projections[i, :].dot(X.T)
            self.histograms[i, :], self.limits[i, :] = cp.histogram(
                projected_data, bins=self.n_bins, density=False)
            self.histograms[i, :] += 1e-12
            self.histograms[i, :] /= cp.sum(self.histograms[i, :])
        return self
Example #4
0
    def fit(self, train_data):
        """
        Fit training data and construct histograms.

        :param train_data: NxD training sample
        :type train_data: cupy.ndarray

        Examples
        --------
        >>> from clx.analytics.loda import Loda
        >>> import cupy as cp
        >>> x = cp.random.randn(100,5) # 5-D multivariate synthetic dataset
        >>> loda_ad = Loda(n_bins=None, n_random_cuts=100)
        >>> loda_ad.fit(x)
        """
        nrows, n_components = train_data.shape
        if not self._n_bins:
            self._n_bins = int(1 * (nrows**1) * (cp.log(nrows)**-1))
        n_nonzero_components = cp.sqrt(n_components)
        n_zero_components = n_components - cp.int(n_nonzero_components)

        self._projections = cp.random.randn(self._n_random_cuts, n_components)
        self._histograms = cp.zeros([self._n_random_cuts, self._n_bins])
        self._limits = cp.zeros((self._n_random_cuts, self._n_bins + 1))
        for i in range(self._n_random_cuts):
            rands = cp.random.permutation(n_components)[:n_zero_components]
            self._projections[i, rands] = 0.
            projected_data = self._projections[i, :].dot(train_data.T)
            self._histograms[i, :], self._limits[i, :] = cp.histogram(
                projected_data, bins=self._n_bins, density=False)
            self._histograms[i, :] += 1e-12
            self._histograms[i, :] /= cp.sum(self._histograms[i, :])
def valid_positions(R, vertices, depth, K, mask, lower, grid_size):
    valid_positions_device(
        ((mask.size * len(vertices)) // 512 + 1, ), (512, ),
        (cp.asarray(K.flatten()), cp.asarray(R.flatten()),
         cp.asarray(vertices.flatten()), cp.asarray(depth.flatten()),
         cp.array(depth.shape, cp.int), cp.float32(grid_size),
         cp.asarray(mask.flatten(), cp.int), cp.array(
             mask.shape, cp.int), cp.asarray(lower), cp.int(len(vertices))))
Example #6
0
def PDcalculate(x, y, data2):
    lon1 = x
    lat1 = y
    lon1 = cp.asarray(lon1)
    lat1 = cp.asarray(lat1)
    lon2 = data2["CLUSTERLONGITUDE"]
    lat2 = data2["CLUSTERLATITUDE"]
    lon3 = lon2.values
    lat3 = lat2.values
    lon4 = cp.asarray(lon3)
    lat4 = cp.asarray(lat3)
    shortdistance = geodistance_cp(lon1, lat1, lon4, lat4)

    indexmin = cp.argmin(shortdistance)
    indexmin = cp.int(indexmin)
    targetcID = data2.at[indexmin, "CLUSTERID"]
    mindistance = cp.int(cp.min(shortdistance))
    return mindistance, targetcID
def concat_hists(weights1, bins1, weights2, bins2, bin_size, rd):
    min1, max1 = cp.around(bins1[0], rd), cp.around(bins1[-1], rd)
    min2, max2 = cp.around(bins2[0], rd), cp.around(bins2[-1], rd)
    mini, maxi = min(min1, min2), max(max1, max2)
    new_bins = cp.arange(
        mini, maxi + bin_size * 0.9,
        bin_size)  # * 0.9 to avoid unexpected random inclusion of last element
    if min1 - mini != 0 and maxi - max1 != 0:
        ext1 = cp.pad(weights1, (cp.int(cp.around((min1 - mini) / bin_size)),
                                 cp.int(cp.around((maxi - max1) / bin_size))),
                      'constant',
                      constant_values=0)
    elif min1 - mini != 0:
        ext1 = cp.pad(weights1, (cp.int(cp.around(
            (min1 - mini) / bin_size)), 0),
                      'constant',
                      constant_values=0)
    elif maxi - max1 != 0:
        ext1 = cp.pad(weights1, (0, cp.int(cp.around(
            (maxi - max1) / bin_size))),
                      'constant',
                      constant_values=0)
    else:
        ext1 = weights1
    if min2 - mini != 0 and maxi - max2 != 0:
        ext2 = cp.pad(weights2, (cp.int(cp.around((min2 - mini) / bin_size)),
                                 cp.int(cp.around((maxi - max2) / bin_size))),
                      'constant',
                      constant_values=0)
    elif min2 - mini != 0:
        ext2 = cp.pad(weights2, (cp.int(cp.around(
            (min2 - mini) / bin_size)), 0),
                      'constant',
                      constant_values=0)
    elif maxi - max2 != 0:
        ext2 = cp.pad(weights2, (0, cp.int(cp.around(
            (maxi - max2) / bin_size))),
                      'constant',
                      constant_values=0)
    else:
        ext2 = weights2
    new_ext = ext1 + ext2
    return new_ext, new_bins
Example #8
0
def expm(A,delta=1e-10):
    j = max(0,cp.int(1+cp.log2(cp.linalg.norm(A,cp.inf))))
    A = A/(2**j)
    q = u_nb.expm_eps_less_than(delta)
    n = A.shape[0]
    I = cp.eye(n)
    D = I
    N = I
    X = I
    c = 1
    sign = 1
    for k in range(1,q+1):
        c = c*(q-k+1)/((2*q - k+ 1)*k)
        X = A@X
        N = N + c*X
        sign = -1*sign
        D = D + sign*c*X
    
    F = cp.linalg.solve(D,N)
    for _ in range(j):
        F = F@F
    
    return F
Example #9
0
def _process_data(samples_history,u_samples_history,n,n_ext,t_start,t_end,target_image,corrupted_image,burn_percentage,isSinogram,sinogram,theta,fbp,SimulationResult_dir,result_file,cmap = plt.cm.seismic_r):
    burn_start_index = np.int(0.01*burn_percentage*u_samples_history.shape[0])
    
    #initial conditions
    samples_init = samples_history[0,:]

    #change
    u_samples_history = u_samples_history[burn_start_index:,:]
    samples_history = samples_history[burn_start_index:,:]
    N = u_samples_history.shape[0]
    
    #initial condition
    vF_init = util.symmetrize(cp.asarray(samples_init)).reshape(2*n-1,2*n-1,order=imc.ORDER)
    # vF_init = vF_init.conj()
    

    vF_mean = util.symmetrize(cp.asarray(np.mean(samples_history,axis=0)))
    vF_stdev = util.symmetrize(cp.asarray(np.std(samples_history,axis=0)))
    vF_abs_stdev = util.symmetrize(cp.asarray(np.std(np.abs(samples_history),axis=0)))
   
    
    fourier = imc.FourierAnalysis_2D(n,n_ext,t_start,t_end)
    sL2 = util.sigmasLancosTwo(cp.int(n))
    
    # if isSinogram:
    #     vF_init = util.symmetrize_2D(fourier.rfft2(cp.asarray(fbp,dtype=cp.float32)))
    
    
#    if not isSinogram:
    vForiginal = util.symmetrize_2D(fourier.rfft2(cp.array(target_image)))
    reconstructed_image_original = fourier.irfft2(vForiginal[:,n-1:])
    reconstructed_image_init = fourier.irfft2(vF_init[:,n-1:])
    
    samples_history_cp = cp.asarray(samples_history)
    v_image_count=0
    v_image_M = cp.zeros_like(reconstructed_image_original)
    v_image_M2 = cp.zeros_like(reconstructed_image_original)
    v_image_aggregate = (v_image_count,v_image_M,v_image_M2)
    for i in range(N):
        vF = util.symmetrize(samples_history_cp[i,:]).reshape(2*n-1,2*n-1,order=imc.ORDER)
        v_temp = fourier.irfft2(vF[:,n-1:])
        v_image_aggregate = util.updateWelford(v_image_aggregate,v_temp)
        
    
    
    v_image_mean,v_image_var,v_image_s_var = util.finalizeWelford(v_image_aggregate)
    
    #TODO: This is sign of wrong processing, Remove this
    # if isSinogram:
    #     reconstructed_image_init = cp.fliplr(reconstructed_image_init)
    #     v_image_mean = cp.fliplr(v_image_mean)
    #     v_image_s_var = cp.fliplr(v_image_s_var)
    
    mask = cp.zeros_like(reconstructed_image_original)
    r = (mask.shape[0]+1)//2
    for i in range(mask.shape[0]):
        for j in range(mask.shape[1]):
            x = 2*(i - r)/mask.shape[0]
            y = 2*(j - r)/mask.shape[1]
            if (x**2+y**2 < 1):
                mask[i,j]=1.
    
    u_samples_history_cp = cp.asarray(u_samples_history)
    u_image = cp.zeros_like(v_image_mean)
    # ell_image = cp.zeros_like(v_image_mean)
    
    u_image_count=0
    u_image_M = cp.zeros_like(u_image)
    u_image_M2 = cp.zeros_like(u_image)
    u_image_aggregate = (u_image_count,u_image_M,u_image_M2)
    ell_image_count=0
    ell_image_M = cp.zeros_like(u_image)
    ell_image_M2 = cp.zeros_like(u_image)
    ell_image_aggregate = (ell_image_count,ell_image_M,ell_image_M2)
    for i in range(N):
        uF = util.symmetrize(u_samples_history_cp[i,:]).reshape(2*n-1,2*n-1,order=imc.ORDER)
        u_temp = fourier.irfft2(uF[:,n-1:])
        u_image_aggregate = util.updateWelford(u_image_aggregate,u_temp)
        ell_temp = cp.exp(u_temp)
        ell_image_aggregate = util.updateWelford(ell_image_aggregate, ell_temp)
    u_image_mean,u_image_var,u_image_s_var = util.finalizeWelford(u_image_aggregate)
    ell_image_mean,ell_image_var,ell_image_s_var = util.finalizeWelford(ell_image_aggregate)

    
    # if isSinogram:
        # u_image_mean = cp.flipud(u_image_mean) #cp.rot90(cp.fft.fftshift(u_image),1) 
        # u_image_var = cp.flipud(u_image_var) #cp.rot90(cp.fft.fftshift(u_image),1) 
        # ell_image_mean = cp.flipud(ell_image_mean)# cp.rot90(cp.fft.fftshift(ell_image),1) 
        # ell_image_var = cp.flipud(ell_image_var)# cp.rot90(cp.fft.fftshift(ell_image),1) 
        
    ri_fourier = cp.asnumpy(reconstructed_image_original)
    
    if isSinogram:
        ri_compare = fbp
    else:
        ri_compare = ri_fourier
   
    is_masked=True
    if is_masked:
        reconstructed_image_var = mask*v_image_s_var
        reconstructed_image_mean = mask*v_image_mean
        reconstructed_image_init = mask*reconstructed_image_init
        u_image_mean = mask*u_image_mean #cp.rot90(cp.fft.fftshift(u_image),1) 
        u_image_s_var = mask*u_image_s_var #cp.rot90(cp.fft.fftshift(u_image),1) 
        ell_image_mean = mask*ell_image_mean# cp.rot90(cp.fft.fftshift(ell_image),1) 
        ell_image_s_var = mask*ell_image_s_var# cp.rot90(cp.fft.fftshift(ell_image),1) 
    else:
        reconstructed_image_mean = v_image_mean        
    
    
    ri_init = cp.asnumpy(reconstructed_image_init)
    
    # ri_fourier = fourier.irfft2((sL2.astype(cp.float32)*vForiginal)[:,n-1:])
    vForiginal_n = cp.asnumpy(vForiginal)
    vF_init_n = cp.asnumpy(vF_init)
    ri_fourier_n = cp.asnumpy(ri_fourier)
    vF_mean_n = cp.asnumpy(vF_mean.reshape(2*n-1,2*n-1,order=imc.ORDER))
    vF_stdev_n = cp.asnumpy(vF_stdev.reshape(2*n-1,2*n-1,order=imc.ORDER))
    vF_abs_stdev_n = cp.asnumpy(vF_abs_stdev.reshape(2*n-1,2*n-1,order=imc.ORDER))
    ri_mean_n = cp.asnumpy(reconstructed_image_mean)
    ri_var_n = cp.asnumpy(reconstructed_image_var)
    ri_std_n = np.sqrt(ri_var_n)

#    ri_n_scalled = ri_n*cp.asnumpy(scalling_factor)
    u_mean_n = cp.asnumpy(u_image_mean)
    u_var_n = cp.asnumpy(u_image_s_var)
    ell_mean_n = cp.asnumpy(ell_image_mean)
    ell_var_n = cp.asnumpy(ell_image_s_var)
    
    
    #Plotting one by one
    #initial condition
    fig = plt.figure()
    plt.subplot(1,2,1)
    im = plt.imshow(np.absolute(vF_init_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier - real part')
    plt.subplot(1,2,2)
    im = plt.imshow(np.angle(vF_init_n),cmap=cmap,vmin=-np.pi,vmax=np.pi)
    fig.colorbar(im)
    plt.title('Fourier - imaginary part')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'vF_init')+image_extension, bbox_inches='tight')
    plt.close()

    #vF Original 
    fig = plt.figure()
    plt.subplot(1,2,1)
    im = plt.imshow(np.absolute(vForiginal_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier - absolute')
    plt.subplot(1,2,2)
    im = plt.imshow(np.angle(vForiginal_n),cmap=cmap,vmin=-np.pi,vmax=np.pi)
    fig.colorbar(im)
    plt.title('Fourier - angle')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'vForiginal')+image_extension, bbox_inches='tight')
    plt.close()

    #vF Original 
    fig = plt.figure()
    plt.subplot(1,2,1)
    im = plt.imshow(np.absolute(vF_mean_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier - absolute')
    plt.subplot(1,2,2)
    im = plt.imshow(np.angle(vF_mean_n),cmap=cmap,vmin=-np.pi,vmax=np.pi)
    fig.colorbar(im)
    plt.title('Fourier - phase')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'vF_mean')+image_extension, bbox_inches='tight')
    plt.close()

    #Absolute error of vF - vForiginal
    fig = plt.figure()
    im = plt.imshow(np.abs(vF_mean_n-vForiginal_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier abs Error')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'abs_err_vF_mean')+image_extension, bbox_inches='tight')
    plt.close()

    #Absolute error of vF_init - vForiginal
    fig = plt.figure()
    im = plt.imshow(np.abs(vF_init_n-vForiginal_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier abs Error')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'abs_err_vF_init')+image_extension, bbox_inches='tight')
    plt.close()

    #Absolute error of vF_init - vForiginal
    fig = plt.figure()
    im = plt.imshow(np.abs(vF_init_n-vF_mean_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Fourier abs Error')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'abs_err_vF_init_vF_mean')+image_extension, bbox_inches='tight')
    plt.close()

    fig = plt.figure()
    im = plt.imshow(ri_mean_n,cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Reconstructed Image mean')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ri_mean_n')+image_extension, bbox_inches='tight')
    plt.close()

    fig = plt.figure()
    im = plt.imshow(ri_fourier,cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Reconstructed Image through Fourier')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ri_or_n')+image_extension, bbox_inches='tight')
    plt.close()

    fig = plt.figure()
    im = plt.imshow(ri_init,cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Reconstructed Image through Fourier')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ri_init')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(ri_var_n,cmap=cmap)
    fig.colorbar(im)
    plt.title('Reconstructed Image variance')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ri_var_n')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(target_image,cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Target Image')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'target_image')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(ri_compare,cmap=cmap,vmin=-1,vmax=1)
    if isSinogram:        
        plt.title('Filtered Back Projection -FBP')
    else:
        plt.title('Reconstructed Image From vFOriginal')
    fig.colorbar(im)
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ri_compare')+image_extension, bbox_inches='tight')
    plt.close()

    fig = plt.figure()
    im = plt.imshow((target_image-ri_mean_n),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Error SPDE')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'err_RI_TI')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow((target_image-ri_compare),cmap=cmap)#,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Error SPDE')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'err_RIO_TI')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow((ri_compare-target_image),cmap=cmap,vmin=-1,vmax=1)
    fig.colorbar(im)
    plt.title('Error FPB')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'err_RI_CMP')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(u_mean_n,cmap=cmap)
    fig.colorbar(im)
    plt.title('Mean $u$')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'u_mean_n')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(u_var_n,cmap=cmap)
    plt.title('Var $u$')
    fig.colorbar(im)
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'u_var_n')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(ell_mean_n,cmap=cmap)
    fig.colorbar(im)
    plt.title('Mean $\ell$')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ell_mean_n')+image_extension, bbox_inches='tight')
    plt.close()
    
    fig = plt.figure()
    im = plt.imshow(ell_var_n,cmap=cmap)
    fig.colorbar(im)
    plt.title('Var $\ell$')
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'ell_var_n')+image_extension, bbox_inches='tight')
    plt.close()
    
    
    fig = plt.figure()
    if isSinogram:
        im = plt.imshow(sinogram,cmap=cmap)
        plt.title('Sinogram')
    else:
        im = plt.imshow(corrupted_image,cmap=cmap)
        plt.title('corrupted_image --- CI')
    fig.colorbar(im)
    plt.tight_layout()
    plt.savefig(str(SimulationResult_dir/'measurement')+image_extension, bbox_inches='tight')
    plt.close()

    #plot several slices
    N_slices = 16
    t_index = np.arange(target_image.shape[1])
    for i in range(N_slices):
        fig = plt.figure()
        slice_index = target_image.shape[0]*i//N_slices
        plt.plot(t_index,target_image[slice_index,:],'-k',linewidth=0.5,markersize=1)
        plt.plot(t_index,ri_fourier_n[slice_index,:],'-r',linewidth=0.5,markersize=1)
        plt.plot(t_index,ri_mean_n[slice_index,:],'-b',linewidth=0.5,markersize=1)
        
        plt.fill_between(t_index,ri_mean_n[slice_index,:]-2*ri_std_n[slice_index,:],
                        ri_mean_n[slice_index,:]+2*ri_std_n[slice_index,:], 
                        color='b', alpha=0.1)
        plt.plot(t_index,ri_compare[slice_index,:],':k',linewidth=0.5,markersize=1)
        plt.savefig(str(SimulationResult_dir/'1D_Slice_{}'.format(slice_index-(target_image.shape[0]//2)))+image_extension, bbox_inches='tight')
        plt.close()

    
    f_index = np.arange(n)
    for i in range(N_slices):
        fig = plt.figure()
        slice_index = vForiginal_n.shape[0]*i//N_slices
        plt.plot(f_index,np.abs(vForiginal_n[slice_index,n-1:]),'-r',linewidth=0.5,markersize=1)
        plt.plot(f_index,np.abs(vF_init_n[slice_index,n-1:]),':k',linewidth=0.5,markersize=1)
        plt.plot(f_index,np.abs(vF_mean_n[slice_index,n-1:]),'-b',linewidth=0.5,markersize=1)
        
        plt.fill_between(f_index,np.abs(vF_mean_n[slice_index,n-1:])-2*vF_abs_stdev_n[slice_index,n-1:],
                        np.abs(vF_mean_n[slice_index,n-1:])+2*vF_abs_stdev_n[slice_index,n-1:], 
                        color='b', alpha=0.1)
        plt.savefig(str(SimulationResult_dir/'1D_F_Slice_{}'.format(slice_index-n))+image_extension, bbox_inches='tight')
        plt.close()
#    fig.colorbar(im, ax=ax[:,:], shrink=0.8)
#    fig.savefig(str(SimulationResult_dir/'Result')+image_extension, bbox_inches='tight')
#    for ax_i in ax.flatten():
#        extent = ax_i.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
#    #    print(ax_i.title.get_text())
#        fig.savefig(str(SimulationResult_dir/ax_i.title.get_text())+''+image_extension, bbox_inches=extent.expanded(1.2, 1.2))
#    
#    fig = plt.figure()
#    plt.hist(u_samples_history[:,0],bins=50,density=1)
    error = (target_image-ri_mean_n)
    error_CMP = (target_image-ri_compare)
    
    L2_error = np.linalg.norm(error)
    MSE = np.sum(error*error)/error.size
    PSNR = 10*np.log10(np.max(ri_mean_n)**2/MSE)
    SNR = np.mean(ri_mean_n)/np.sqrt(MSE*(error.size/(error.size-1)))
    
    L2_error_CMP = np.linalg.norm(error_CMP)
    MSE_CMP = np.sum(error_CMP*error_CMP)/error_CMP.size
    PSNR_CMP = 10*np.log10(np.max(ri_compare)**2/MSE_CMP)
    SNR_CMP = np.mean(ri_compare)/np.sqrt(MSE_CMP*(error_CMP.size/(error_CMP.size-1)))
    metric = {'L2_error':L2_error,
               'MSE':MSE,
               'PSNR':PSNR,
               'SNR':SNR,
                'L2_error_CMP':L2_error_CMP,
                'MSE_CMP':MSE_CMP,
                'PSNR_CMP':PSNR_CMP,
                'SNR_CMP':SNR_CMP}
    with h5py.File(result_file,mode='a') as file:
        for key,value in metric.items():
            if key in file.keys():
                del file[key]
            # else:
            file.create_dataset(key,data=value)
        
    print('Shallow-SPDE : L2-error {}, MSE {}, SNR {}, PSNR {},'.format(L2_error,MSE,SNR,PSNR))
    print('FBP : L2-error {}, MSE {}, SNR {}, PSNR {}'.format(L2_error_CMP,MSE_CMP,SNR_CMP,PSNR_CMP))
Example #10
0
 trainY_val = dataY_tmp[train_idx_val, :]
 testX_val = dataX[test_idx_val, :]
 testY_val = dataY_tmp[test_idx_val, :]
 [model_tmp, train_acc_temp, test_acc_temp, training_time_temp, testing_time_temp] = MRVFL(trainX_val, trainY_val, testX_val, testY_val, option)
 del model_tmp
 cp._default_memory_pool.free_all_blocks()
 while (test_acc_temp > tMAX_acc).any():
     if (test_acc_temp > MAX_acc).any():
         tMAX_acc = sMAX_acc
         sMAX_acc = MAX_acc
         MAX_acc = test_acc_temp.max()
         option_best.acc_test = test_acc_temp.max()
         option_best.acc_train = train_acc_temp.max()
         option_best.C = option.C
         option_best.N = option.N
         option_best.L = cp.int(test_acc_temp.argmax()+1)
         option_best.scale = option.scale
         option_best.nCV = i
         option_best.ratio = r
         option_best.drop = d
         test_acc_temp[test_acc_temp.argmax()] = 0
         print('Temp Best Option:{}'.format(option_best.__dict__))
     elif (test_acc_temp > sMAX_acc).any():
         tMAX_acc = sMAX_acc
         sMAX_acc = test_acc_temp.max()
         option_sbest.acc_test = test_acc_temp.max()
         option_sbest.acc_train = train_acc_temp.max()
         option_sbest.C = option.C
         option_sbest.N = option.N
         option_sbest.L = cp.int(test_acc_temp.argmax()+1)
         option_sbest.scale = option.scale
Example #11
0
def post_analysis(input_dir):
    relative_path = pathlib.Path(
        "//data.triton.aalto.fi/work/emzirm1/SimulationResult/")
    SimulationResult_dir = relative_path / input_dir
    file = h5py.File(str(SimulationResult_dir / 'result.hdf5'), mode='r')

    samples_history = file['Layers 1/samples_history'][()]
    u_samples_history = file['Layers 0/samples_history'][()]
    #    meas_std = file['measurement/stdev'][()]
    burn_start_index = np.int(0.3 * u_samples_history.shape[0])
    u_samples_history = u_samples_history[burn_start_index:, :]
    samples_history = samples_history[burn_start_index:, :]
    N = u_samples_history.shape[0]

    mean_field = util.symmetrize(cp.asarray(np.mean(samples_history, axis=0)))
    #    u_mean_field = util.symmetrize(cp.asarray(np.mean(u_samples_history,axis=0)))
    #    stdev_field = util.symmetrize(cp.asarray(np.std(samples_history,axis=0)))
    n = file['fourier/basis_number'][()]
    n_ext = file['fourier/extended_basis_number'][()]
    t_start = file['t_start'][()]
    t_end = file['t_end'][()]
    target_image = file['measurement/target_image'][()]
    corrupted_image = file['measurement/corrupted_image'][()]

    isSinogram = 'sinogram' in file['measurement'].keys()

    if isSinogram:
        sinogram = file['measurement/sinogram'][()]
        theta = file['measurement/theta'][()]
        fbp = iradon(sinogram, theta, circle=True)

    fourier = imc.FourierAnalysis_2D(n, n_ext, t_start, t_end)
    sL2 = util.sigmasLancosTwo(cp.int(n))

    vF = mean_field.reshape(2 * n - 1, 2 * n - 1, order=imc.ORDER).T

    #    if not isSinogram:
    vForiginal = sL2 * util.symmetrize_2D(
        fourier.fourierTransformHalf(cp.array(target_image)))

    vFn = cp.asnumpy(vF)

    reconstructed_image = fourier.inverseFourierLimited(vF[:, n - 1:])

    if isSinogram:
        reconstructed_image = cp.rot90(cp.fft.fftshift(reconstructed_image),
                                       -1)

    reconstructed_image_original = fourier.inverseFourierLimited(
        vForiginal[:, n - 1:])
    scalling_factor = (cp.max(reconstructed_image_original) -
                       cp.min(reconstructed_image_original)) / (
                           cp.max(reconstructed_image) -
                           cp.min(reconstructed_image))

    u_samples_history_cp = cp.asarray(u_samples_history)
    u_image = cp.zeros_like(reconstructed_image)
    for i in range(N):
        uF = util.symmetrize(u_samples_history_cp[i, :]).reshape(
            2 * n - 1, 2 * n - 1, order=imc.ORDER).T
        u_image += fourier.inverseFourierLimited(uF[:, n - 1:]) / N

    if isSinogram:
        u_image = cp.rot90(cp.fft.fftshift(u_image), -1)

    ri_n = cp.asnumpy(reconstructed_image)
    if isSinogram:
        ri_or_n = fbp
    else:
        ri_or_n = cp.asnumpy(reconstructed_image_original)

    ri_n_scalled = ri_n * cp.asnumpy(scalling_factor)
    u_n = cp.asnumpy(u_image / (np.max(ri_n) - np.min(ri_n)))
    ell_n = np.exp(u_n)
    fig, ax = plt.subplots(ncols=3, nrows=3, figsize=(15, 15))
    ax[0, 0].imshow(ri_n_scalled, cmap=plt.cm.Greys_r)
    ax[0, 0].set_title('Reconstructed Image From vF---  RI')
    ax[0, 1].imshow(target_image, cmap=plt.cm.Greys_r)
    ax[0, 1].set_title('Target Image ---  TI')
    if isSinogram:
        ax[0, 2].imshow(fbp, cmap=plt.cm.Greys_r)
        ax[0, 2].set_title('FBP --- RIO')
    else:
        ax[0, 2].imshow(ri_or_n, cmap=plt.cm.Greys_r)
        ax[0, 2].set_title('Reconstructed Image From vFOriginal --- RIO')

    ax[1, 0].imshow(np.abs(target_image - ri_n_scalled), cmap=plt.cm.Greys_r)
    ax[1, 0].set_title('Absolute error--- RI-TI')
    ax[1, 1].imshow(np.abs(ri_or_n - target_image), cmap=plt.cm.Greys_r)
    ax[1, 1].set_title('Absolute error--- RIO-TI')
    ax[1, 2].imshow(np.abs(ri_n_scalled - ri_or_n), cmap=plt.cm.Greys_r)
    ax[1, 2].set_title('Absolute error--- RI-RIO')
    ax[2, 0].imshow(u_n, cmap=plt.cm.Greys_r)
    ax[2, 0].set_title('Field u--- u')
    ax[2, 1].imshow(ell_n, cmap=plt.cm.Greys_r)
    ax[2, 1].set_title('Length Scale of v--- ell')

    if isSinogram:
        im = ax[2, 2].imshow(sinogram, cmap=plt.cm.Greys_r)
        ax[2, 2].set_title('Measurement (Sinogram) --- CI')
    else:
        im = ax[2, 2].imshow(corrupted_image, cmap=plt.cm.Greys_r)
        ax[2, 2].set_title('Measurement (corrupted_image) --- CI')

    fig.colorbar(im, ax=ax[:, :], shrink=0.8)
    fig.savefig(str(SimulationResult_dir / 'Result.pdf'), bbox_inches='tight')
    for ax_i in ax.flatten():
        extent = ax_i.get_window_extent().transformed(
            fig.dpi_scale_trans.inverted())
        #    print(ax_i.title.get_text())
        fig.savefig(str(SimulationResult_dir / ax_i.title.get_text()) + '.pdf',
                    bbox_inches=extent.expanded(1.2, 1.2))

    return file
Example #12
0
def find_binary_threshold(v, vol_frac):

    frac_ind = np.int(np.ceil(v.size * (1-vol_frac)))
    return np.partition(v.ravel(), frac_ind)[frac_ind]
Example #13
0
            if theta_img[i,j] <= theta_max:
                
                count = 0
                ray = cp.zeros((stop + 1 - start, 3), dtype=cp.int)

                for k in range(start, stop + 1):

                    if count > 0:
                        ray[k-start,:] = cp.array([cp.nan, cp.nan, cp.nan])
                    else:
                        theta_slice = thetav[k,:,:]
                        theta_slice[theta_slice > theta_max] = cp.nan
                        phi_slice = phiv[k,:,:]
                        dist = cp.sqrt((theta_slice - theta_img[i,j])**2 + (phi_slice - phi_img[i,j])**2)
                        mini, minj = cp.unravel_index(cp.nanargmin(dist), theta_slice.shape)
                        mini = cp.int(mini); minj = cp.int(minj)
                        # prevent smearing
                        if (mini in boundary) or (minj in boundary):
                            count += 1

                        ray[k-start,0] = k
                        ray[k-start,1] = mini
                        ray[k-start,2] = minj

                not_nans = ~cp.isnan(ray[:,0])
                how_many = cp.sum(not_nans)
                kx = ray[:,0]
                kx = kx[not_nans]
                ix = ray[:,1]
                ix = ix[not_nans]
                jx = ray[:,2]
Example #14
0
def MRVFLtrain(trainX, trainY, option):
    fs_mode = 'INF'
    rand_seed = np.random.RandomState(2)

    [n_sample, n_dims] = trainX.shape
    N = option.N
    L = option.L
    C = option.C
    s = option.scale
    mode = option.mode
    ratio = option.ratio
    drop = option.drop

    TrainingAccuracy = cp.zeros(L)

    if mode == 'merged':
        drop_amount = cp.int(cp.floor(drop * N))
        selected_amount = cp.int(cp.floor(ratio * N))
        bi = []

    A = []
    beta = []
    weights = []
    biases = []
    mu = []
    sigma = []
    sfi = []
    fs = []

    A_input = trainX

    time_start = time.time()

    for i in range(L):

        if i == 0:
            w = s * 2 * cp.asarray(rand_seed.rand(n_dims, N)) - 1

        elif mode == 'merged':
            ######################### SETTING
            # w = s * 2 * cp.asarray(rand_seed.rand(n_dims - drop_amount + N, N)) - 1
            w = s * 2 * cp.asarray(
                rand_seed.rand(n_dims + selected_amount - drop_amount + N,
                               N)) - 1
            # w = s * 2 * cp.asarray(rand_seed.rand(n_dims + selected_amount*i - drop_amount + N, N)) - 1

        b = s * cp.asarray(rand_seed.rand(1, N))
        weights.append(w)
        biases.append(b)

        A_ = cp.matmul(A_input, w)  # A_ should be 100 at any loop
        # layer normalization
        A_mean = cp.mean(A_, axis=0)
        A_std = cp.std(A_, axis=0)
        A_ = (A_ - A_mean) / A_std
        mu.append(A_mean)
        sigma.append(A_std)

        A_ = A_ + cp.repeat(b, n_sample, 0)
        A_ = selu(A_)
        if i == 0:
            A_tmp = cp.concatenate(
                [trainX, A_, cp.ones((n_sample, 1))], axis=1)
        else:
            A_tmp = cp.concatenate(
                [trainX, sf, A_, cp.ones((n_sample, 1))], axis=1)
        beta_ = l2_weights(A_tmp, trainY, C, n_sample)

        if fs_mode == 'LASSO':
            significance = cp.linalg.norm(beta_, ord=1, axis=1)
            ranked_index = cp.argsort(significance[n_dims:-1])
        if fs_mode == 'RIDGE':
            significance = cp.linalg.norm(beta_, ord=2, axis=1)
            ranked_index = cp.argsort(significance[n_dims:-1])
        elif fs_mode == 'MI':
            at = cp.asnumpy(A_tmp[:, n_dims:-1])
            ty = cp.asnumpy(cp.asarray([cp.argmax(i) for i in trainY]))
            mis = mi(at, ty)
            # with joblib.parallel_backend('loky'):
            # mis = Parallel(10)(delayed(mi)(at[:, i].reshape(-1, 1), ty) for i in range(N))
            # ranked_index = cp.argsort(cp.asarray(mis).ravel())
            ranked_index = cp.argsort(mis)
        elif fs_mode == 'INF':
            at = cp.asnumpy(A_tmp[:, n_dims:-1])
            rank, score = inf_fs(at)
            ranked_index = rank

        A.append(A_tmp)
        beta.append(beta_)

        selected_index = ranked_index[:
                                      selected_amount]  # chosen features, used in the next layers

        sfi.append(selected_index)
        left_amount = N - drop_amount
        left_index = ranked_index[:left_amount]
        A_except_trainX = A_tmp[:, n_dims:-1]
        A_selected = A_except_trainX[:, selected_index]
        fs.append(A_selected)
        A_ = A_except_trainX[:, left_index]

        ################### SETTING
        sf = A_selected
        # sf = cp.concatenate(fs, axis=1)

        ################### SETTING
        A_input = cp.concatenate([trainX, sf, A_], axis=1)
        # A_input = cp.concatenate([trainX,  A_], axis=1)

        bi.append(left_index)

        pred_result = cp.zeros((n_sample, i + 1))
        for j in range(i + 1):
            Ai = A[j]
            beta_temp = beta[j]
            predict_score = cp.matmul(Ai, beta_temp)
            predict_index = cp.argmax(predict_score, axis=1).ravel()
            # indx=indx.reshape(n_sample,1)
            pred_result[:, j] = predict_index
        TrainingAccuracy_temp = majorityVoting(trainY, pred_result)
        TrainingAccuracy[i] = TrainingAccuracy_temp
    '''    
    ## Calculate the training accuracy
    pred_result = cp.zeros((n_sample, L))
    for i in range(L):
        Ai = A[i]
        beta_temp = beta[i]
        predict_score = cp.matmul(Ai, beta_temp)
        predict_index = cp.argmax(predict_score, axis=1).ravel()
        # indx=indx.reshape(n_sample,1)
        pred_result[:, i] = predict_index
        
    TrainingAccuracy = majorityVoting(trainY, pred_result)
    '''

    time_end = time.time()
    Training_time = time_end - time_start

    model = mod(L, weights, biases, beta, mu, sigma, sfi, bi)

    return model, TrainingAccuracy, Training_time
Example #15
0
def main(dataset, device_number):
    root_path = '/home/hu/eRVFL/UCIdata'
    # data_name = 'cardiotocography-10clases'
    data_name = dataset
    # n_device = 6
    n_device = device_number
    print('Dataset Name:{}\nDevice Number:{}'.format(data_name, n_device))
    logging.debug('Dataset Name:{}\tDevice Number:{}'.format(
        data_name, n_device))
    cp.cuda.Device(n_device).use()
    cp.cuda.Device(n_device).use()
    # load dataset
    # dataX
    datax = np.loadtxt('{0}/{1}/{1}_py.dat'.format(root_path, data_name),
                       delimiter=',')
    dataX = cp.asarray(datax)
    # dataY
    datay = np.loadtxt('{}/{}/labels_py.dat'.format(root_path, data_name),
                       delimiter=',')
    dataY = cp.asarray(datay)

    # Validation Index
    Validation = np.loadtxt('{}/{}/validation_folds_py.dat'.format(
        root_path, data_name),
                            delimiter=',')
    validation = cp.asarray(Validation)

    # Folds Index
    Folds_index = np.loadtxt('{}/{}/folds_py.dat'.format(root_path, data_name),
                             delimiter=',')
    folds_index = cp.asarray(Folds_index)

    types = cp.unique(dataY)
    n_types = types.size
    n_CV = folds_index.shape[1]
    # One hot coding for the target
    dataY_tmp = cp.zeros((dataY.size, n_types))
    for i in range(n_types):
        for j in range(dataY.size):  # remove this loop
            if dataY[j] == types[i]:
                dataY_tmp[j, i] = 1

    option = op(N=256,
                L=16,
                C=2**-6,
                scale=1,
                seed=1,
                nCV=0,
                ratio=0,
                mode='merged',
                drop=0)
    if dataX.shape[1] <= 16:
        N_range = [16, 32, 64]
        #N_range = [1024, 2048, 4096]
    elif dataX.shape[1] <= 64:
        N_range = [64, 128, 256, 512]
    else:
        N_range = [128, 256, 512, 1024]
    option.L = 16
    option.scale = 1
    C_range = np.append(0, 2.**np.arange(-6, 12, 2))
    # C_range = 2.**np.arange(-6, 12, 2)

    Models = []
    # dataX = rescale(dataX) #####delete

    train_acc_result = cp.zeros((n_CV, 1))
    test_acc_result = cp.zeros((n_CV, 1))
    train_time_result = cp.zeros((n_CV, 1))
    test_time_result = cp.zeros((n_CV, 1))

    option_best = op(N=256,
                     L=32,
                     C=2**-6,
                     scale=1,
                     seed=0,
                     nCV=0,
                     ratio=0,
                     mode='merged',
                     drop=0)
    option_sbest = op(N=256,
                      L=32,
                      C=2**-6,
                      scale=1,
                      seed=0,
                      nCV=0,
                      ratio=0,
                      mode='merged',
                      drop=0)
    option_tbest = op(N=256,
                      L=32,
                      C=2**-6,
                      scale=1,
                      seed=0,
                      nCV=0,
                      ratio=0,
                      mode='merged',
                      drop=0)
    for i in range(n_CV):
        MAX_acc = 0
        sMAX_acc = 0
        tMAX_acc = 0
        train_idx = cp.where(folds_index[:, i] == 0)[0]
        test_idx = cp.where(folds_index[:, i] == 1)[0]
        trainX = dataX[train_idx, :]
        trainY = dataY_tmp[train_idx, :]
        testX = dataX[test_idx, :]
        testY = dataY_tmp[test_idx, :]
        st = time.time()
        for n in N_range:
            option.N = n
            for j in C_range:
                option.C = j
                for r in cp.arange(0, 0.6, 0.1):
                    option.ratio = r
                    # for d in [0]:
                    for d in cp.arange(0, 0.6, 0.2):
                        option.drop = d
                        train_idx_val = cp.where(validation[:, i] == 0)[0]
                        test_idx_val = cp.where(validation[:, i] == 1)[0]
                        trainX_val = dataX[train_idx_val, :]
                        trainY_val = dataY_tmp[train_idx_val, :]
                        testX_val = dataX[test_idx_val, :]
                        testY_val = dataY_tmp[test_idx_val, :]
                        [
                            model_tmp, train_acc_temp, test_acc_temp,
                            training_time_temp, testing_time_temp
                        ] = MRVFL(trainX_val, trainY_val, testX_val, testY_val,
                                  option)
                        del model_tmp
                        cp._default_memory_pool.free_all_blocks()
                        while (test_acc_temp > tMAX_acc).any():
                            if (test_acc_temp > MAX_acc).any():
                                tMAX_acc = sMAX_acc
                                sMAX_acc = MAX_acc
                                MAX_acc = test_acc_temp.max()
                                option_best.acc_test = test_acc_temp.max()
                                option_best.acc_train = train_acc_temp.max()
                                option_best.C = option.C
                                option_best.N = option.N
                                option_best.L = cp.int(test_acc_temp.argmax() +
                                                       1)
                                option_best.scale = option.scale
                                option_best.nCV = i
                                option_best.ratio = r
                                option_best.drop = d
                                test_acc_temp[test_acc_temp.argmax()] = 0
                                print('Temp Best Option:{}'.format(
                                    option_best.__dict__))
                                logging.debug('Temp Best Option:{}'.format(
                                    option_best.__dict__))
                            elif (test_acc_temp > sMAX_acc).any():
                                tMAX_acc = sMAX_acc
                                sMAX_acc = test_acc_temp.max()
                                option_sbest.acc_test = test_acc_temp.max()
                                option_sbest.acc_train = train_acc_temp.max()
                                option_sbest.C = option.C
                                option_sbest.N = option.N
                                option_sbest.L = cp.int(
                                    test_acc_temp.argmax() + 1)
                                option_sbest.scale = option.scale
                                option_sbest.nCV = i
                                option_sbest.ratio = r
                                option_sbest.drop = d
                                test_acc_temp[test_acc_temp.argmax()] = 0
                                print('Temp Second Best Option:{}'.format(
                                    option_sbest.__dict__))
                                logging.debug(
                                    'Temp Second Best Option:{}'.format(
                                        option_sbest.__dict__))
                            elif (test_acc_temp > tMAX_acc).any():
                                tMAX_acc = test_acc_temp.max()
                                option_tbest.acc_test = test_acc_temp.max()
                                option_tbest.acc_train = train_acc_temp.max()
                                option_tbest.C = option.C
                                option_tbest.N = option.N
                                option_tbest.L = cp.int(
                                    test_acc_temp.argmax() + 1)
                                option_tbest.scale = option.scale
                                option_tbest.nCV = i
                                option_tbest.ratio = r
                                option_tbest.drop = d
                                test_acc_temp[test_acc_temp.argmax()] = 0
                                print('Temp Third Best Option:{}'.format(
                                    option_tbest.__dict__))
                                logging.debug(
                                    'Temp Third Best Option:{}'.format(
                                        option_tbest.__dict__))
                        #print('Training Time for one option set:{:.2f}'.format(time.time() - st))
                        #logging.debug('Training Time for one option set:{:.2f}'.format(time.time() - st))
        [model_RVFL0, train_acc0, test_acc0, train_time0,
         test_time0] = MRVFL(trainX, trainY, testX, testY, option_best)
        [model_RVFL1, train_acc1, test_acc1, train_time1,
         test_time1] = MRVFL(trainX, trainY, testX, testY, option_sbest)
        [model_RVFL2, train_acc2, test_acc2, train_time2,
         test_time2] = MRVFL(trainX, trainY, testX, testY, option_tbest)
        best_index = cp.argmax(
            cp.array([test_acc0.max(),
                      test_acc1.max(),
                      test_acc2.max()]))
        print('Best Index:{}'.format(best_index))
        print('Training Time for one fold set:{:.2f}'.format(time.time() - st))
        logging.debug(
            'Best Index:{}\nTraining Time for one fold set:{:.2f}'.format(
                best_index,
                time.time() - st))

        model_RVFL = eval('model_RVFL{}'.format(best_index))
        Models.append(model_RVFL)
        train_acc_result[i] = eval('train_acc{}.max()'.format(best_index))
        test_acc_result[i] = eval('test_acc{}.max()'.format(best_index))
        train_time_result[i] = eval('train_time{}'.format(best_index))
        test_time_result[i] = eval('test_time{}'.format(best_index))
        del model_RVFL
        cp._default_memory_pool.free_all_blocks()
        print(
            'Best Train accuracy in fold{}:{}\nBest Test accuracy in fold{}:{}'
            .format(i, train_acc_result[i], i, test_acc_result[i]))
        logging.debug(
            'Best Train accuracy in fold{}:{}\nBest Test accuracy in fold{}:{}'
            .format(i, train_acc_result[i], i, test_acc_result[i]))
    mean_train_acc = train_acc_result.mean()
    mean_test_acc = test_acc_result.mean()
    print('Train accuracy:{}\nTest accuracy:{}'.format(train_acc_result,
                                                       test_acc_result))
    logging.debug('Train accuracy:{}\nTest accuracy:{}'.format(
        train_acc_result, test_acc_result))
    print('Mean train accuracy:{}\nMean test accuracy:{}'.format(
        mean_train_acc, mean_test_acc))
    logging.debug('Mean train accuracy:{}\tMean test accuracy:{}'.format(
        mean_train_acc, mean_test_acc))
    save_result = open('Model_{}'.format(data_name), 'wb')
    pickle.dump(Models, save_result)
    save_result.close()
Example #16
0
                count = 0
                ray = cp.zeros((stop + 1 - start, 3), dtype=cp.int)

                for k in range(start, stop + 1):

                    if count > 0:
                        ray[k - start, :] = cp.array([cp.nan, cp.nan, cp.nan])
                    else:
                        theta_slice = thetav[k, :, :]
                        theta_slice[theta_slice > theta_max] = cp.nan
                        phi_slice = phiv[k, :, :]
                        dist = cp.sqrt((theta_slice - theta_img[i, j])**2 +
                                       (phi_slice - phi_img[i, j])**2)
                        mini, minj = cp.unravel_index(cp.nanargmin(dist),
                                                      theta_slice.shape)
                        mini = cp.int(mini)
                        minj = cp.int(minj)
                        # prevent smearing
                        if (mini in boundary) or (minj in boundary):
                            count += 1

                        ray[k - start, 0] = k
                        ray[k - start, 1] = mini
                        ray[k - start, 2] = minj

                not_nans = ~cp.isnan(ray[:, 0])
                how_many = cp.sum(not_nans)
                kx = ray[:, 0]
                kx = kx[not_nans]
                ix = ray[:, 1]
                ix = ix[not_nans]