def alternative_projection(self,
                               signal,
                               mode='weight',
                               verbose=False,
                               lambd=.2,
                               soft=False,
                               scaled=True,
                               tol=10**(-5),
                               num_it=15):
        x, H = preprocess_signal(signal, affine=True)
        coef = self.transform(signal)
        reconstruct = self.inverse(coef)

        sig_norm, iteration = norm(x), 1
        while norm(x -
                   H * reconstruct) / sig_norm > tol and iteration < num_it:
            iteration += 1
            coef = coef + 2 * lambd * self.transform(H * (x - reconstruct),
                                                     mode=mode)
            if scaled:
                coef = np.reshape(coef,
                                  (len(coef) // self.frames, self.frames))
                for i in range(len(coef)):
                    coef[i, :] = thresholding(coef[i, :], lambd, soft=soft)
                coef = coef.flatten()
            else:
                coef = thresholding(coef, lambd, soft)
            reconstruct = self.inverse(coef)

        if verbose:
            return coef, reconstruct
        return coef
Example #2
0
    def newatom(self, atoms):
        """ See if we can just add one new atom """
        temp = list(self.cutoffs)
        temp.append(self.cutoff)
        self.cutoffs = np.asarray(temp)

        newpos = atoms.get_positions()[-1]

        offset = self.cell[0]

        neighs = []
        for i, pos in enumerate(self.positions):
            if norm(newpos - pos) < self.cutoff:
                neighs.append(i)
            elif norm(newpos + offset - pos) < self.cutoff:
                neighs.append(i)
            elif norm(newpos - offset - pos) < self.cutoff:
                neighs.append(i)

        temppos = self.positions.tolist()
        temppos.append(newpos)
        self.positions = np.asarray(temppos)

        self.neighbors.append(np.asarray(neighs))
        self.displacements.append(np.zeros((len(neighs), 3)))
        self.num_atoms += 1

        self.logger.info("Added an atom to the neighborlist...")
        return self.update(atoms)
Example #3
0
    def newatom(self,atoms):
        """ See if we can just add one new atom """
        temp = list(self.cutoffs)
        temp.append(self.cutoff)
        self.cutoffs = np.asarray( temp )

        newpos = atoms.get_positions()[-1]

        offset = self.cell[0]

        neighs = []
        for i,pos in enumerate(self.positions):
            if norm(newpos-pos) < self.cutoff:
                neighs.append(i)
            elif norm(newpos+offset-pos) < self.cutoff:
                neighs.append(i)
            elif norm(newpos-offset-pos) < self.cutoff:
                neighs.append(i)
        
        temppos = self.positions.tolist()
        temppos.append(newpos)
        self.positions = np.asarray(temppos)
        
        self.neighbors.append(np.asarray(neighs))
        self.displacements.append(np.zeros((len(neighs),3)))
        self.num_atoms += 1

        self.logger.info("Added an atom to the neighborlist...")
        return self.update(atoms)
def alternative_projection(signal,
                           lambd=.1,
                           alpha=0,
                           soft=True,
                           tol=10**(-2),
                           num_it=25,
                           mode='same',
                           affine=True):
    length = len(signal)
    guess, H = preprocess_signal(signal, affine=affine)
    sig = np.copy(guess)
    if mode == 'weigth':
        guess = signal

    def projection(guess):
        return H * sig + (1 - H) * guess

    def gradient(guess):
        return alpha * guess + (1 - alpha) * projection(guess)

    def wavelet_threshold(guess, thres):
        coef = wavelet_transform(guess, mode=mode)
        coef = thresholding(coef, thres, soft)
        return inverse_wavelet_transform(coef, length=length)

    sig_norm, i = norm(sig), 1
    while norm(sig - guess) / sig_norm > tol and i < num_it:
        i += 1
        guess = wavelet_threshold(gradient(guess), lambd)
    return guess
Example #5
0
 def disp_old(self):
     """ Displacement from one to two """
     direct = self.one.pos - self.two.pos
     around = self.one.pos + self.offset - self.two.pos
     if norm(direct) <= norm(around):
         return direct
     else:
         return around
Example #6
0
 def disp_old(self):
     """ Displacement from one to two """
     direct = self.one.pos - self.two.pos
     around = self.one.pos + self.offset -self.two.pos
     if norm(direct) <= norm(around):
         return direct
     else:
         return around
Example #7
0
def force_func(cell1,cell2,a,xi):
    """ the native force function between two positions """
    x1 = cell1.pos
    x2 = cell2.pos
    disp = x1 - x2
    mod_disp = norm(disp)
    force = 2 * a**4 * ( 2 * xi**2 - 3 * xi * mod_disp + mod_disp**2 )/( xi**2 * mod_disp**6 ) * disp
    
    return force
Example #8
0
def force_func(cell1, cell2, a, xi):
    """ the native force function between two positions """
    x1 = cell1.pos
    x2 = cell2.pos
    disp = x1 - x2
    mod_disp = norm(disp)
    force = 2 * a**4 * (2 * xi**2 - 3 * xi * mod_disp +
                        mod_disp**2) / (xi**2 * mod_disp**6) * disp

    return force
Example #9
0
def force_func2(cell1,cell2,a,xi):
    """ the native force function between two positions """
    x1 = cell1.pos
    x2 = cell2.pos
    r1 = cell1.radius
    r2 = cell2.radius
    disp = x1 - x2
    mod_disp = norm(disp)
    a1=a*(r1+r2)
    xi1=xi*(r1+r2)
    force = 2 * a1**4 * ( 2 * xi1**2 - 3 * xi1 * mod_disp + mod_disp**2 )/( xi1**2 * mod_disp**6 ) * disp
    
    return force
def distance(pos_1, pos_2, box_length):
    """ Calculates periodic distance between two points """
    x = helper.abs_min(pos_1[0] - pos_2[0], pos_1[0]  - pos_2[0] + box_length, pos_1[0]  - pos_2[0] - box_length)  

    y = helper.abs_min(pos_1[1] - pos_2[1], 
                pos_1[1]  - pos_2[1] + box_length, 
                pos_1[1]  - pos_2[1] - box_length) 
        
    z = helper.abs_min(pos_1[2] - pos_2[2], 
                pos_1[2]  - pos_2[2] + box_length, 
                pos_1[2]  - pos_2[2] - box_length)        
        
    return helper.norm(x,y,z)
Example #11
0
def force_func2(cell1, cell2, a, xi):
    """ the native force function between two positions """
    x1 = cell1.pos
    x2 = cell2.pos
    r1 = cell1.radius
    r2 = cell2.radius
    disp = x1 - x2
    mod_disp = norm(disp)
    a1 = a * (r1 + r2)
    xi1 = xi * (r1 + r2)
    force = 2 * a1**4 * (2 * xi1**2 - 3 * xi1 * mod_disp +
                         mod_disp**2) / (xi1**2 * mod_disp**6) * disp

    return force
Example #12
0
def force_func_hertz(cell1, cell2, a, xi):
    """ the Hertz force between two cells """
    x1 = cell1.pos
    x2 = cell2.pos
    r1 = cell1.radius
    r2 = cell2.radius
    disp = x1 - x2
    mod_disp = norm(disp)
    delta = (r1 + r2) - mod_disp
    if delta > 0.0:
        force = a * delta**1.5 * disp / mod_disp
    else:
        force = 0.0

    return force
Example #13
0
def force_func_hertz(cell1,cell2,a,xi):
     """ the Hertz force between two cells """
     x1 = cell1.pos
     x2 = cell2.pos
     r1 = cell1.radius
     r2 = cell2.radius
     disp = x1 - x2
     mod_disp = norm(disp)
     delta=(r1+r2)-mod_disp
     if delta > 0.0:
         force = a*delta**1.5*disp/mod_disp
     else:
         force= 0.0

     return force
Example #14
0
def CPD_MWU(X, F, sketching_rates, lamb, eps, nu, rank, num_iterations=100):
    # Keep residual errors + res time
    error = []
    res_time = 0

    # Cache norm + Id + unfolding of X
    norm_x = norm(X)
    Id = np.eye(rank)
    X_unfold = [unfold(X, mode=0), unfold(X, mode=1), unfold(X, mode=2)]

    # Initialize weights
    weights = np.array([1] * len(sketching_rates)) / (len(sketching_rates))

    # Randomly initialize A,B,C
    dim_1, dim_2, dim_3 = X.shape
    A, B, C = rand_init(dim_1, rank), rand_init(dim_2,
                                                rank), rand_init(dim_3, rank)

    # Append initialization residual error
    error.append(residual_error(X_unfold[0], norm_x, A, B, C))

    # Run CPD_MWU for num iterations
    for i in range(num_iterations):
        # Select sketching rate with probability proportional to w_i
        s = sample(sketching_rates, weights)

        # Solve Ridge Regression for A,B,C
        A, B, C = update_factors(A, B, C, X_unfold, Id, lamb, s, rank)

        # Update weights
        if bern(eps) == 1 and len(sketching_rates) > 1:
            update_weights(A, B, C, X_unfold, Id, norm_x, lamb, weights,
                           sketching_rates, rank, nu, eps)

        print("iteration:", i)
        start = time.time()
        error.append(residual_error(X_unfold[0], norm_x, A, B, C))
        end = time.time()
        res_time += end - start
    return A, B, C, np.array(error), res_time
Example #15
0
def ind_linear_independent(sorted_design, cor_thres = .5, reduct = 1):
    X = np.copy(sorted_design)
    X -= np.mean(X, axis=0)
    X /= np.sqrt(np.sum(X**2, axis=0))
    family, ind, i = [X[:,0]], [0], 1
    dim, nb_features = X.shape[0], X.shape[1]
    ind_extractor = np.zeros(nb_features) == 1
    ind_extractor[0] = True
    while i < nb_features and len(family) < dim-reduct:
        if i % 5000 == 0:
            print(i, len(family))
        cur = X[:,i]
        add = True
        """ Avoid to much correlation with prior features """
        for feature_ind in ind:
            cor = np.abs(np.sum(cur*X[:,feature_ind]))
            if cor > cor_thres:
                add = False
                break
        if not add:
            i += 1
        else:
            """ Gram_Schimdt """
            for f in family:
                scp = np.sum(cur*f)
                cur = cur - scp * f
            
            """ Check Linear Dependency """
            tmp = norm(cur)
            if tmp < 10**(-4):
                i += 1
            else:
                cur = cur / tmp
                ind.append(i)
                family.append(cur)
                ind_extractor[i] = True
                i += 1
    return ind_extractor
Example #16
0
    def extension_without_breaking(self):
        """ Get the extension of the current link without breaking """
        length = norm(self.disp)

        return length
Example #17
0
def povray_making_movie(position):

    X_size = 60

    # position == 0 ----> dermis
    # position == 1 ----> epidermis
    # position == 2 ----> top of basal membrane
    # position == 3 ----> bottom of basal membrane
    #position = 1

    if position == 0:
        open_file = 'states/indaderma_state_'
        open_file_pressure = 'pressure/indaderma_pressure.dat'
        write_file = '3D/indaderma_3D_'

    if position == 1:
        open_file = 'states/indaepidermis_state_'
        open_file_pressure = 'pressure/indaepidermis_pressure.dat'
        write_file = '3D/indaepidermis_3D_'

    if position == 2:
        open_file = 'states/top_basal_membrane_state_'
        open_file_pressure = 'pressure/top_basal_membrane_pressure.dat'
        write_file = '3D/top_basal_membrane_3D_'

    if position == 3:
        open_file = 'states/bottom_basal_membrane_state_'
        open_file_pressure = 'pressure/bottom_basal_membrane_pressure.dat'
        write_file = '3D/bottom_basal_membrane_3D_'

    filename_upload = open_file_pressure
    upload = loadtxt(filename_upload)
    stress = empty((len(upload), 1))
    for i in range(0, len(upload)):
        stress[i] = upload[i][1]

    max_number_of_cells = int(1 + upload[len(upload) - 1][0])
    print 'max number of cells = ', max_number_of_cells

    for num_of_cell in range(0, max_number_of_cells):
        print 'num_of_cell = ', num_of_cell
        open_file_state = open_file + str(num_of_cell) + '.dat'
        with open(open_file_state, 'r') as f:
            config, cells, links, ghosts, T = pickle.load(f)
        stress_partial = empty((num_of_cell + 1, 1))
        for i in range(0, num_of_cell + 1):
            stress_partial[i] = stress[(num_of_cell + 1) * num_of_cell / 2 + i]
        print stress_partial
        if len(stress_partial) > 1:
            stress_partial = (stress_partial - stress_partial.min()) / (
                stress_partial.max() - stress_partial.min()) * 0.9 + 0.1
        else:
            stress_partial[0] = 0.5
        col_array = []
        for i in range(0, len(stress_partial)):
            rgb_color = cm.hot(1 - stress_partial[i], 1.0)
            col_array.append(rgb_color)

        write_file_3D = write_file + str(num_of_cell) + '.txt'
        file_povray = open(write_file_3D, 'w')

        numero_cancer = 0
        for cell in cells:
            if cell.type.name == 'tDermal':
                color = 'color LimeGreen'
            if cell.type.name == 'Epidermal':
                color = 'color MediumBlue'
            if cell.type.name == 'Basal':
                color = 'color Gray20'
            if cell.type.name == 'Corneum':
                color = 'color MediumVioletRed'
            if cell.type.name == 'Cancer':
                color = 'color <' + str(
                    col_array[numero_cancer][0][0]) + ',' + str(
                        col_array[numero_cancer][0][1]) + ',' + str(
                            col_array[numero_cancer][0][2]) + '>'
                numero_cancer = numero_cancer + 1
            s = 'sphere { <0, 0, 0>,' + str(
                cell.radius
            ) + ' material {texture {pigment {' + color + '} finish { specular specularvalue roughness roughnessvalue reflection phongreflection }}}translate<' + str(
                cell.pos[0]) + ',' + str(cell.pos[1]) + ', 0.0 >}\n'
            file_povray.write(s)

        cutoff = X_size / 2
        for link in links:
            if (link.two.pos[0] != link.one.pos[0]) and (link.two.pos[1] !=
                                                         link.one.pos[1]):
                d12 = link.one.pos - link.two.pos
                abs_d12 = norm(d12)
                if abs_d12 < cutoff:
                    color = 'color White'
                    s = 'cylinder { <' + str(link.one.pos[0]) + ',' + str(
                        link.one.pos[1]
                    ) + ', 0.0 >,<' + str(link.two.pos[0]) + ',' + str(
                        link.two.pos[1]
                    ) + ', 0.0 >,' + str(
                        0.1
                    ) + ' material {texture {pigment {' + color + '} finish { phong phongvalue_cyl phong_size phongsize_cyl reflection phongreflection_cyl}}}}\n'
                    file_povray.write(s)
        file_povray.close
Example #18
0
 def disp(self):
     """ Displacement from one to two """
     disp = self.one.pos - self.two.pos
     if norm(disp) > self.xsize / 2:
         disp = self.one.pos + self.offset - self.two.pos
     return disp
Example #19
0
def povray_making_movie(position) :

    X_size = 60

    # position == 0 ----> dermis
    # position == 1 ----> epidermis
    # position == 2 ----> top of basal membrane
    # position == 3 ----> bottom of basal membrane
    #position = 1

   
    if position == 0:
        open_file = 'states/indaderma_state_'
        open_file_pressure = 'pressure/indaderma_pressure.dat'
        write_file = '3D/indaderma_3D_'
        
    if position == 1:
        open_file = 'states/indaepidermis_state_'
        open_file_pressure = 'pressure/indaepidermis_pressure.dat'
        write_file = '3D/indaepidermis_3D_'
       
    if position == 2:
        open_file = 'states/top_basal_membrane_state_'
        open_file_pressure = 'pressure/top_basal_membrane_pressure.dat'
        write_file = '3D/top_basal_membrane_3D_'
        
    if position == 3:
        open_file = 'states/bottom_basal_membrane_state_'
        open_file_pressure = 'pressure/bottom_basal_membrane_pressure.dat'
        write_file = '3D/bottom_basal_membrane_3D_'
    


    filename_upload = open_file_pressure
    upload  = loadtxt(filename_upload)
    stress = empty((len(upload), 1))
    for i in range(0,len(upload)) :
        stress[i] = upload[i][1]
    
    max_number_of_cells = int(1 + upload[len(upload)-1][0])
    print 'max number of cells = ', max_number_of_cells

    for num_of_cell in range (0, max_number_of_cells):
        print 'num_of_cell = ', num_of_cell
        open_file_state = open_file + str(num_of_cell) +'.dat'
        with open(open_file_state,'r') as f:
            config, cells, links, ghosts, T = pickle.load(f)
        stress_partial = empty((num_of_cell + 1, 1))
        for i in range (0, num_of_cell+1) :
            stress_partial[i] = stress[(num_of_cell+1)*num_of_cell/2+i]
        print stress_partial
        if len(stress_partial)>1 :
            stress_partial = (stress_partial-stress_partial.min())/(stress_partial.max()-stress_partial.min())*0.9+0.1
        else :
            stress_partial[0] = 0.5
        col_array = []
        for i in range(0,len(stress_partial)) :
            rgb_color =  cm.hot(1-stress_partial[i],1.0)
            col_array.append(rgb_color)

        write_file_3D =  write_file +  str(num_of_cell) + '.txt'
        file_povray=open(write_file_3D,'w')
 
        numero_cancer = 0
        for cell in cells:
            if cell.type.name == 'tDermal':
                color = 'color LimeGreen'
            if cell.type.name == 'Epidermal':
                color = 'color MediumBlue'
            if cell.type.name == 'Basal':
                color = 'color Gray20'
            if cell.type.name == 'Corneum':
                color = 'color MediumVioletRed'
            if cell.type.name == 'Cancer':
                color = 'color <' + str(col_array[numero_cancer][0][0]) + ',' + str(col_array[numero_cancer][0][1]) + ',' +  str(col_array[numero_cancer][0][2]) + '>'
                numero_cancer = numero_cancer + 1
            s = 'sphere { <0, 0, 0>,' + str(cell.radius) + ' material {texture {pigment {' + color + '} finish { specular specularvalue roughness roughnessvalue reflection phongreflection }}}translate<' + str(cell.pos[0]) + ',' + str(cell.pos[1]) + ', 0.0 >}\n'
            file_povray.write(s)

        cutoff = X_size/2
        for link in links:
            if (link.two.pos[0]!=link.one.pos[0]) and (link.two.pos[1]!=link.one.pos[1]):
                d12 = link.one.pos-link.two.pos
                abs_d12 = norm(d12)
                if abs_d12 < cutoff :
                    color = 'color White'
                    s = 'cylinder { <' + str(link.one.pos[0]) + ',' + str(link.one.pos[1]) + ', 0.0 >,<' + str(link.two.pos[0]) + ',' + str(link.two.pos[1]) + ', 0.0 >,'  + str(0.1) + ' material {texture {pigment {' + color + '} finish { phong phongvalue_cyl phong_size phongsize_cyl reflection phongreflection_cyl}}}}\n'
                    file_povray.write(s)
        file_povray.close
Example #20
0
 def basic_feat(self, reviewsfile, dictfile, docfreq, outputfile, tfidf_file, 
                norm_file, k, normFlag=False, tfidf=False):
     """
         This method simply maps each review text to sparse feature matrix.
         The number of features is defined by dictionary size. The value of
         each cell in the sparse matrix is just the number of times that
         token occurred in the review.
         
         Parameters
         -----------
         reviewsfile: string
             path of the file containing the reviews. each line is a new review
         dictfile: string
             path of file containing the tokens in training corpus and their 
             frequencies.
         docfreq: string
             path of the file containing the term-document frequencies. This is
             needed to the tf-idf calculations.
         outfile: string
             path of the output file
         tfidf_file: string
             path of output file to store tfidf features
         norm_file: string
             path of output file to store normalized features
         k: int
             size of the dictionary
         normFlag: bool
             when true perform standard normalization, rescale the data 
             to [0,1]
         tfidf: bool
             when true replace term frequency with tf-idf
             
         Note
         -----
         Output file format is sparse matrix
     """
     
     self._dictionary(dictfile, docfreq, k) #create the dictionary of top k terms
     # self._idf = pickle.load(open(docfreq,"r"))
     row = list()
     column = list()
     val = list() #base values
     val_idf = list() #tfidf version of base values
     val_norm = list() #norm version of base values
     line_no = 0
     
     # for each line
     for line in pickle.load(open(reviewsfile, "r")):
         #calculate term frequency for the review text wrt dictionary
         #this is simply word count
         tf = np.zeros(len(self._vocab))
         for term in line.split(" "):
             if self._vocab.has_key(term):
                 tf[self._vocab[term]] += 1
         #non zero term index
         non_zero = tf > 0
         
         data = tf[non_zero] #non zero terms
         col = np.arange(len(self._vocab))
         col = col[non_zero]
         
         #create a list for use in sparse matrix initialization later
         for c, d in zip(col, data):
             row.append(line_no)
             column.append(c)
             val.append(d)
             
         #calculate the tfidf version for non zero terms
         if tfidf == True:
             data_tfidf = tf[non_zero] * self._idf[non_zero]
             [val_idf.append(d) for d in data_tfidf]
         
         #calculate the normalized version for non zero terms
         if normFlag == True:
             data_norm = norm(tf[non_zero])
             [val_norm.append(d) for d in data_norm]
         
         line_no += 1
     #create sparse matrix and dump to output file
     mat = csr_matrix((val, (row, column)), (line_no, k))
     pickle.dump(obj=mat, file=open(outputfile, "w+"))
     print "base features generated"
     
     #create sparse matrix of tfidf terms and dump to output
     if tfidf == True:
         mat = csr_matrix((val_idf, (row, column)), (line_no, k))
         pickle.dump(obj=mat, file=open(tfidf_file, "w+"))
         print "base features with tfidf generated"
     
     #create sparse matrix of normalized terms and dump to output
     if normFlag == True:
         mat = csr_matrix((val_norm, (row, column)), (line_no, k))
         pickle.dump(obj=mat, file=open(norm_file, "w+"))
         print "base features with normalization generated"
Example #21
0
    def hash_feat(self, reviewsfile, dictfile, docfreq, outputfile, 
        tfidf_file, norm_file, v, k, normFlag=False, tfidf=False):
        """
            This method first generates the dictionary of specified size.
            It then considers only the terms from dictionary in each review
            and maps it to a feature space by hashing
            Simply to the previous case the output is simply a sparse feature
            matrix where each token is term frequency wrt the review text.
            
            Parameters
            ----------
            reviewsfile: string
                path of the file containing the reviews. each line is a new review
            dictfile: string
                path of file containing the tokens in training corpus and their 
                frequencies.
            docfreq: string
                path of the file containing the term-document frequencies. This is
                needed to the tf-idf calculations.
            outfile: string
                path of the output file
            tfidf_file: string
                path of output file to store tfidf features
            norm_file: string
                path of output file to store normalized features
            v: int 
                size of vocabulary
            k: int
                size of the dictionary
            normFlag: bool
                when true perform standard normalization, rescale the data 
                to [0,1]
            tfidf: bool
                when true replace term frequency with tf-idf
        """

        self._dictionary(dictfile,docfreq, k=v) #create the dictionary of top k terms
        row = list()
        column = list()
        val = list()#base values
        val_norm = list()#norm version of base values
        val_idf = list()#tfidf version of base values
        line_no = 0
        
        for line in pickle.load(open(reviewsfile, "r")):
            #store term frequency wrt current review and overall
            tf = np.zeros(k)
            idf = np.zeros(k)
            for term in line.split(" "):
                if self._vocab.has_key(term):
                    tf[hash(term) % k] += 1
                    #in case of collisions store the highest tf
                    idf[hash(term) % k] = np.max((idf[hash(term) % k], self._idf[hash(term) % k])) 
        
            non_zero = tf > 0 #non zero term index
            data = tf[non_zero]
            col = np.arange(k)
            col = col[ non_zero]
            
            #create a list for use in sparse matrix initialization later
            for c, d in zip(col, data):
                row.append(line_no)
                column.append(c)
                val.append(d)
            
            #create sparse matrix of tfidf terms and dump to output
            if tfidf == True:
                data_tfidf = tf[non_zero] * idf[non_zero]
                [val_idf.append(d) for d in data_tfidf]
            
            #create sparse matrix of normalized terms and dump to output
            if normFlag == True:
                data_norm = norm(tf[non_zero])
                [val_norm.append(d) for d in data_norm]
            
            line_no += 1

        #create sparse matrix and dump to output file
        mat = csr_matrix((val, (row, column)), (line_no, k))
        pickle.dump(obj=mat, file=open(outputfile, "w+"))
        print "hash features generated"
        
        #create sparse matrix of tfidf terms and dump to output
        if tfidf == True:
            mat = csr_matrix((val_idf, (row, column)), (line_no, k))
            pickle.dump(obj=mat, file=open(tfidf_file, "w+"))
            print "hash features with tfidf generated"
        
        #create sparse matrix of normalized terms and dump to output
        if normFlag == True:
            mat = csr_matrix((val_norm, (row, column)), (line_no, k))
            pickle.dump(obj=mat, file=open(norm_file, "w+"))
            print "hash features with normalization generated"
Example #22
0
def bras_CPD(F, X, rank, B, alpha, beta, num_iterations=100, max_time=None):
    # bookkeeping
    total_time = 0
    res_error = []
    time = []

    # Cache norm
    start = timer()
    norm_x = norm(X)
    F_norm = [norm(F[0]), norm(F[1]), norm(F[2])]

    # Randomly initialize A,B,C
    dim_1, dim_2, dim_3 = X.shape
    A = [
        rand_init(dim_1, rank),
        rand_init(dim_2, rank),
        rand_init(dim_3, rank)
    ]
    total_col = {0: dim_2 * dim_3, 1: dim_1 * dim_3, 2: dim_1 * dim_2}

    # Cache Unfoldings
    X_unfold = [unfold(X, mode=0), unfold(X, mode=1), unfold(X, mode=2)]

    # Finish timing initialization step
    end = timer()
    total_time += end - start

    # Append initialization residual error
    res_error.append(residual_error(X_unfold[0], norm_x, A[0], A[1], A[2]))
    time.append(total_time)

    # Run bras_CPD
    if max_time == None:
        for r in range(num_iterations):
            if (r + 1) % 5000 == 0:
                print("iteration:", r)

            # Time start step
            start = timer()
            # Randomly select mode n time update.
            n = sample(3)
            # Generate sketching indices
            idx = generate_sketch_indices(B, total_col[n])
            # Update Factor matrix
            update_factor_bras(X_unfold, A, idx, n, rank, alpha)
            # Update learning rate
            alpha /= (r + 1)**beta
            # Time iteration step
            end = timer()
            total_time += end - start
            # Append error
            res_error.append(
                residual_error(X_unfold[0], norm_x, A[0], A[1], A[2]))
    else:
        r = 1
        while total_time < max_time:
            # Time start step
            start = timer()
            # Randomly select mode n time update.
            n = sample(3)
            # Generate sketching indices
            idx = generate_sketch_indices(B, total_col[n])
            # Update Factor matrix
            update_factor_bras(X_unfold, A, idx, n, rank, alpha)
            # Update learning rate
            alpha /= (r + 1)**beta
            r += 1
            # Time iteration step
            end = timer()
            total_time += end - start
            # Append error
            res_error.append(
                residual_error(X_unfold[0], norm_x, A[0], A[1], A[2]))
            time.append(total_time)

    return total_time, res_error, time
Example #23
0
 def disp(self):
     """ Displacement from one to two """
     disp = self.one.pos - self.two.pos
     if norm(disp) > self.xsize/2:
         disp = self.one.pos + self.offset - self.two.pos
     return disp
Example #24
0
# print "98% : " + str( final_result )
# final_result = norm(df,1,0.97)
# print "97% : " + str( final_result )

# final_result = norm(df,1,0.96)
# print "96% : " + str( final_result )

# final_result = norm (df,1,0.95)
# print "95% : " + str(final_result)

# final_result = norm (df,1,0.95)
# print "94% : " + str(final_result)
#
# final_result = norm (df,1,0.95)
# print "93% : " + str(final_result)
final_result = norm(df, 1, 0.90)
print "90% : " + str(final_result)
final_result = norm(df, 1, 0.85)
print "85% : " + str(final_result)

# saved_column = df.time           #you can also use df['column_name']
# maxi=saved_column.max()
# mini=saved_column.min()

# bin_size = 1
# global_diff = maxi - mini
# global_count = len(saved_column)
# count = [0]*(int(round(global_diff/bin_size)))
# count_size = len(count)
# print saved_column
# print "iteration"
Example #25
0
    def extension_without_breaking(self):
        """ Get the extension of the current link without breaking """
        length = norm(self.disp)

        return length