예제 #1
0
    def stitchAllBlobs(slidelist, quiet=True, debug=False):
        t_start_stitching = time.time()
        printl('')
        for slide_num, slide in enumerate(slidelist[:-1]):
            # Skipping last slide, because pairing go from lower slide to upper slide, so it's already processed with the second to last slide
            # IE blob2ds in the last slide are partners to the previous slide's blob2ds, and have no direct possible partners of their own
            t_start_stitching_this_slide = time.time()
            printl('Stitching %s blob2ds from slide #%s/%s to %s blob2ds from slide #%s/%s' % (len(slide.blob2dlist), slide_num + 1,
                len(slidelist), len(slidelist[slide_num+1].blob2dlist), str(slide_num + 2), len(slidelist)), end=' ')

            progress = ProgressBar(max_val=len(slide.blob2dlist), increments=20,
                                   symbol='.')  # Note actually more responsive to do based on blob than # of pixels, due to using only a subset to stitch
            for b_num, blob1 in enumerate(slide.blob2dlist):
                blob1 = Blob2d.get(blob1)
                if len(blob1.possible_partners) > 0:
                    if debug:
                        printl('  Starting on a new blob from bloblist:' + str(blob1) + ' which has:' + str(
                            len(blob1.possible_partners)) + ' possible partners')
                for b2_num, blob2 in enumerate(blob1.possible_partners):
                    blob2 = Blob2d.get(blob2)
                    if debug:
                        printl('   Comparing to blob2:' + str(blob2))
                    new_stitch = Pairing(blob1.id, blob2.id, 1.1, 36, quiet=quiet) # TODO use this to assign ids to pairings
                progress.update(b_num, set_val=True)

            if quiet and not debug:
                progress.finish()
                print_elapsed_time(t_start_stitching_this_slide, time.time(), prefix='took')
        print_elapsed_time(t_start_stitching, time.time(), prefix='Stitching all slides took', endline=False)
        printl(' total')
예제 #2
0
    def fit(self, x, y, steps=0, batch_size=32):
        num_batches = x.shape[0] // batch_size

        for i, p in enumerate(self.particles):
            local_score = p.get_score(x, y)

            if local_score < self.global_best_score:
                self.global_best_score = local_score
                self.global_best_weights = p.get_best_weights()

        print "PSO -- Initial best score {:0.4f}".format(self.global_best_score)

        bar = ProgressBar(steps, updates=20)

        for i in range(steps):
            for j in range(num_batches):
                x_ = x[j*batch_size:(j+1)*batch_size,:]
                y_ = y[j*batch_size:(j+1)*batch_size]

                for p in self.particles:
                    local_score = p.step(x_, y_, self.global_best_weights)

                    if local_score < self.global_best_score:
                        self.global_best_score = local_score
                        self.global_best_weights = p.get_best_weights()

            bar.update(i)

        bar.done()
예제 #3
0
    def loadFromFile(self, file_path):
        if self.log is not None:
            self.log.log("Loading DFT data")
            self.log.indent()

            self.log.log("File = %s" % (file_path))

        with open(file_path, 'r') as file:
            text = file.read()

        text = text.rstrip()
        lines = text.split("\n")

        progress = ProgressBar("Poscar Files ",
                               22,
                               len(lines),
                               update_every=50)
        progress.estimate = False
        # This code originally had validation checks for all values.
        # For now, they have been removed. Experience using the program
        # for quite a while has led me to believe that they are uneccessary.

        start_line = 0
        while start_line < len(lines):
            # We need to know the number of atoms in the file
            # before we can send the proper string of text to
            # the parsing function.

            atoms_in_struct = int(lines[start_line + 5])

            base = start_line
            stride = base + 8 + atoms_in_struct
            structure_lines = lines[base:stride]

            struct = PoscarStructure(structure_lines, self.e_shift)
            self.n_atoms += struct.n_atoms
            self.structures.append(struct)

            if struct.comment not in self.all_comments:
                self.all_comments.append(struct.comment)

            start_line += 8 + atoms_in_struct
            progress.update(start_line)

            self.n_structures = len(self.structures)

        progress.finish()
        self.loaded = True

        if self.log is not None:
            self.log.log("Atoms      Loaded = %i" % self.n_atoms)
            self.log.log("Structures Loaded = %i" % self.n_structures)
            self.log.unindent()

        return self
예제 #4
0
파일: lsp.py 프로젝트: derangedhk417/pyfit
def GenerateLocalStructureParams(neighbor_list, potential_config, log=None):

    if log is not None:
        log.log("Generating Local Structure Parameters")
        log.indent()

    # Here we compute the number of operations that will need
    # to take place in order to calculate the structural
    # parameters. This is somewhat of an estimate, but the
    # operation should scale roughly by a factor of n^2.
    # In practice, this has generally been an excellent estimate.
    n_total = 0
    for struct in neighbor_list:
        for atom in struct:
            n_total += (len(atom)**2 - len(atom)) / 2
    n_processed = 0

    progress = ProgressBar("Structural Parameters ",
                           22,
                           n_total,
                           update_every=8)

    structural_parameters = []

    parameters_per_atom = potential_config.n_legendre_polynomials
    parameters_per_atom *= potential_config.n_r0
    # Here we iterate over every structure. And then
    # over every atom. We export the structural parameter
    # calculation for each individual atom to another function.
    for struct in neighbor_list:
        processed = 0
        # Iterate over all structures.
        parameters_for_structure = np.zeros((len(struct), parameters_per_atom))
        for idx, atom_neighbors in enumerate(struct):
            processed += (len(atom_neighbors)**2 - len(atom_neighbors)) / 2
            # Iterate over each atom in the structure and compute the
            # parameters for it.
            parameters_for_structure[idx, :] = computeParameters(
                atom_neighbors, potential_config)

        n_processed += processed
        progress.update(n_processed)

        structural_parameters.append(parameters_for_structure)

    progress.finish()

    if log is not None:
        log.log("Time Elapsed = %ss" % progress.ttc)
        log.unindent()

    return structural_parameters
예제 #5
0
 def set_all_shape_contexts(slidelist):
     # Note Use the shape contexts approach from here: http://www.cs.berkeley.edu/~malik/papers/mori-belongie-malik-pami05.pdf
     # Note The paper uses 'Representative Shape Contexts' to do inital matching; I will do away with this in favor of checking bounds for possible overlaps
     t0 = time.time()
     pb = ProgressBar(max_val=sum(
         len(Blob2d.get(b2d).edge_pixels) for slide in slidelist
         for b2d in slide.blob2dlist))
     for slide in slidelist:
         for blob in slide.blob2dlist:
             Blob2d.get(blob).set_shape_contexts(36)
             pb.update(len(Blob2d.get(blob).edge_pixels), set_val=False)
     pb.finish()
     print_elapsed_time(t0, time.time(), prefix='took')
예제 #6
0
    def loadFromText(self, text):
        lines = text.rstrip().split('\n')

        self.config = PotentialConfig().loadFromText('\n'.join(lines[:8]))

        self.potential_type = int(self._getCellsFromLine(lines[8])[0])
        self.n_structures = int(self._getCellsFromLine(lines[9])[0])
        self.n_atoms = int(self._getCellsFromLine(lines[10])[0])

        parameters_per_atom = self.config.n_legendre_polynomials
        parameters_per_atom *= self.config.n_r0

        progress = ProgressBar("Loading Training Set",
                               22,
                               self.n_structures,
                               update_every=10)

        # Every set of two lines from 13 onwards should correspond to a single
        # atom. Line 12 doesn't contain useful information.

        # This code will convert the file into a list of structures. Each
        # element in this list is a list of training inputs, each one
        # corresponding to an atom in the structure.
        self.structures = []
        idx = 12
        current_struct = []
        current_id = 0
        while idx < len(lines):

            atom = TrainingInput().fromLines(lines[idx], lines[idx + 1],
                                             parameters_per_atom)

            if atom.structure_id != current_id:
                self.structures.append(current_struct)
                current_struct = []
                current_id = atom.structure_id
                progress.update(current_id + 1)

            current_struct.append(atom)
            idx += 2

        progress.finish()
        self.structures.append(current_struct)

        if self.log is not None:
            self.log.log("Atoms      Loaded = %i" % self.n_atoms)
            self.log.log("Structures Loaded = %i" % self.n_structures)
            self.log.log("Time Elapsed = %ss" % progress.ttc)
            self.log.unindent()

        return self
예제 #7
0
def compute_dff(data, percentile=8., window_size=1., step_size=.025, subtract_minimum=True, pad_mode='edge'):
    """Compute delta-f-over-f

    Computes the percentile-based delta-f-over-f along the 0th axis of the supplied data.

    Parameters
    ----------
    data : np.ndarray
        n-dimensional data (DFF is taken over axis 0)
    percentile : float 
        percentile of data window to be taken as F0
    window_size : float
        size of window to determine F0, in seconds
    step_size : float
        size of steps used to determine F0, in seconds
    subtract_minimum : bool
        substract minimum value from data before computing
    pad_mode : str 
        mode argument for np.pad, used to specify F0 determination at start of data

    Returns
    -------
    Data of the same shape as input, transformed to DFF
    """
    data = data.copy()

    window_size = int(window_size*data.fs)
    step_size = int(step_size*data.fs)

    if step_size<1:
        warnings.warn('Requested a step size smaller than sampling interval. Using sampling interval.')
        step_size = 1.

    if subtract_minimum:
        data -= data.min()
     
    pad_size = window_size - 1
    pad = ((pad_size,0),) + tuple([(0,0) for _ in xrange(data.ndim-1)])
    padded = np.pad(data, pad, mode=pad_mode)

    out_size = ((len(padded) - window_size) // step_size) + 1
    pbar = ProgressBar(maxval=out_size).start()
    f0 = []
    for idx,win in enumerate(sw(padded, ws=window_size, ss=step_size)):
        f0.append(np.percentile(win, percentile, axis=0))
        pbar.update(idx)
    f0 = np.repeat(f0, step_size, axis=0)[:len(data)]
    pbar.finish()

    return (data-f0)/f0 
예제 #8
0
    def writeToFile(self, file_path):
        if self.log is not None:
            self.log.log("Writing Training Set to File")
            self.log.indent()
            self.log.log("File = %s" % (file_path))

        # 50 Kb buffer because these files are always large. This should
        # make the write a little faster.
        with open(file_path, 'w', 1024 * 50) as file:
            file.write(self.config.toFileString(prepend_comment=True))
            file.write(' # %i - Potential Type\n' % (1))
            file.write(' # %i - Number of Structures\n' % (self.n_structures))
            file.write(' # %i - Number of Atoms\n' % (self.n_atoms))
            file.write(' # ATOM-ID GROUP-NAME GROUP_ID STRUCTURE_ID ')
            file.write('STRUCTURE_Natom STRUCTURE_E_DFT STRUCTURE_Vol\n')

            progress = ProgressBar("Writing LSParams ",
                                   22,
                                   self.n_atoms,
                                   update_every=50)
            progress.estimate = False

            atom_idx = 0
            for struct in self.structures:
                for training_input in struct:

                    file.write(
                        'ATOM-%i %s %i %i %i %.6E %.6E\n' %
                        (atom_idx, training_input.group_name,
                         training_input.group_id, training_input.structure_id,
                         training_input.structure_n_atoms,
                         training_input.structure_energy,
                         training_input.structure_volume))

                    current_params = training_input.structure_params
                    params_strs = ['%.6E' % g for g in current_params]
                    params_strs = ' '.join(params_strs)
                    file.write('Gi  %s\n' % (params_strs))

                    atom_idx += 1

                progress.update(atom_idx)

            progress.finish()
            file.write('\n')

        if self.log is not None:
            self.log.log("Time Elapsed = %ss" % progress.ttc)
            self.log.unindent()
예제 #9
0
	def generateLSP(self, neighbors, max_chunk=500):
		chunk_start  = 0
		chunk_stride = chunk_start + max_chunk

		lsp = None

		progress = ProgressBar(
			"Structural Parameters ", 
			22, int(len(neighbors) / max_chunk),
			update_every = 5
		)

		idx = 0

		while chunk_start < len(neighbors):
			self.loadNeighbors(neighbors[chunk_start:chunk_stride])

			tmp = self._computeLSP()
			self.cleanupNeighbors()

			if lsp is None:
				lsp = tmp
			else:
				lsp = torch.cat((lsp, tmp), 0)

			chunk_start  += max_chunk
			chunk_stride += max_chunk

			chunk_stride = min(chunk_stride, len(neighbors))

			idx += 1
			progress.update(idx)

		progress.finish()

		return lsp
예제 #10
0
    def _train_loop(self):
        progress = ProgressBar("Training ",
                               22,
                               self.iterations + int(self.iterations == 0),
                               update_every=1)

        while self.iteration <= self.iterations:
            progress.update(self.iteration)

            self.training_losses[self.iteration] = self.last_loss

            if self.restart_error != 0.0:
                if self.last_loss > self.restart_error:
                    if self.restarts == 3:
                        if self.log is not None:
                            msg = "Maximum number of restarts exceeded."
                            self.log.log(msg)
                        break
                    else:
                        if self.log is not None:
                            msg = "Error threshold exceeded, restarting."
                            self.log.log(msg)
                        self.need_to_restart = True
                        self.restarts += 1
                        break

            # The following lines figure out if we have reached an iteration
            # where validation information or volume vs. energy information
            # needs to be stored.
            if self.val_interval != 0:
                if self.iteration % self.val_interval == 0:
                    idx = (self.iteration // self.val_interval)
                    self.validation_losses[idx] = self.validation_loss()

            if self.energy_interval != 0:
                if self.iteration % self.energy_interval == 0:
                    idx = (self.iteration // self.energy_interval)
                    self.energies[idx, :] = self.get_structure_energies()

            if self.backup_interval != 0:
                if self.iteration % self.backup_interval == 0:
                    idx = self.iteration
                    path = self.backup_dir + 'nn_bk_%05i.nn.dat' % idx
                    layers = self.nn.getNetworkValues()
                    self.potential.layers = layers
                    self.potential.writeNetwork(path)

            if self.smi_log != '':
                if self.iteration % 50 == 0:
                    try:
                        smi_stdout = subprocess.getoutput("nvidia-smi")
                        self.smi_outputs.append(smi_stdout)
                    except:
                        self.smi_outputs.append("nvidia-smi call failed")

            # Perform an evaluate and correct step, while storing
            # the resulting loss in self.training_losses.
            self.optimizer.step(self.training_closure)

            self.iteration += 1

        progress.finish()
예제 #11
0
        elif args.sweep_dir != '':
            delta = args.z_max - args.z_min
            args.z_min = z_center - (delta / 2)
            args.z_max = args.z_min + delta

    if args.sweep_dir != '':
        if not os.path.isdir(args.sweep_dir):
            os.mkdir(args.sweep_dir)

        if args.sweep_dir[-1] != '/':
            args.sweep_dir += '/'

        progress = ProgressBar("Rendering ", 22, args.sweep_n, update_every=1)

        sweep = np.linspace(args.z_min, args.z_max, args.sweep_n)
        for idx, z in enumerate(sweep):
            fname = args.sweep_dir + '%05i.png' % idx
            render_heatmap(structure,
                           potential,
                           nn,
                           res,
                           width,
                           z,
                           args,
                           save=fname)

            progress.update(idx + 1)

        progress.finish()
    else:
        render_heatmap(structure, potential, nn, res, width, args.z, args)
예제 #12
0
파일: motion.py 프로젝트: agiovann/pyfluo
def compute_motion_correction(mov, max_shift=5, sub_pixel=True, template_func=np.median, n_iters=5):
    """Computes motion correction shifts by template matching
    
    Parameters
    ----------
    (described in correct_motion doc)
    
    This can be used on its own to attain only the shifts without correcting the movie
    """
    def _run_iter(mov, base_shape, ms, sub_pixel):
        mov = mov.astype(np.float32)
        h_i,w_i = base_shape
        template=template_func(mov,axis=0)
        template=template[ms:h_i-ms,ms:w_i-ms].astype(np.float32)
        h,w = template.shape

        shifts=[]   # store the amount of shift in each frame
        
        for i,frame in enumerate(mov):
             pbar.update(it_i*len(mov) + i)
             res = cv2.matchTemplate(frame,template,cv2.TM_CCORR_NORMED)
             avg_corr=np.mean(res);
             top_left = cv2.minMaxLoc(res)[3]
             sh_y,sh_x = top_left
             bottom_right = (top_left[0] + w, top_left[1] + h)

             if sub_pixel:
                 if (0 < top_left[1] < 2 * ms-1) & (0 < top_left[0] < 2 * ms-1):
                     # if max is internal, check for subpixel shift using gaussian
                     # peak registration
                     log_xm1_y = np.log(res[sh_x-1,sh_y])
                     log_xp1_y = np.log(res[sh_x+1,sh_y])             
                     log_x_ym1 = np.log(res[sh_x,sh_y-1])             
                     log_x_yp1 = np.log(res[sh_x,sh_y+1])             
                     four_log_xy = 4*np.log(res[sh_x,sh_y])

                     sh_x_n = -(sh_x - ms + (log_xm1_y - log_xp1_y) / (2 * log_xm1_y - four_log_xy + 2 * log_xp1_y))
                     sh_y_n = -(sh_y - ms + (log_x_ym1 - log_x_yp1) / (2 * log_x_ym1 - four_log_xy + 2 * log_x_yp1))
                 else:
                     sh_x_n = -(sh_x - ms)
                     sh_y_n = -(sh_y - ms)
                         
                 M = np.float32([[1,0,sh_y_n],[0,1,sh_x_n]])
                 mov[i] = cv2.warpAffine(frame,M,(w_i,h_i),flags=cv2.INTER_CUBIC)
             else:
                 sh_x = -(top_left[1] - ms)
                 sh_y = -(top_left[0] - ms)
                 M = np.float32([[1,0,sh_y],[0,1,sh_x]])
                 mov[i] = cv2.warpAffine(frame,M,(w_i,h_i))
             shifts.append([sh_x_n,sh_y_n,avg_corr]) 
                 
        return (template,np.array(shifts),mov)

    mov_orig = mov.copy()
    h_i,w_i = mov.shape[1:]
    templates = []
    values = []
    n_steps = n_iters*len(mov_orig) #for progress bar
    pbar = ProgressBar(maxval=n_steps).start() 
    for it_i in xrange(n_iters):
        pbar.update(it_i*len(mov_orig))
        ti,vi,mov = _run_iter(mov, (h_i,w_i), max_shift, sub_pixel)
        templates.append(ti)
        values.append(vi)
    pbar.finish()
    return np.array(templates), np.array(values)
예제 #13
0
파일: motion.py 프로젝트: agiovann/pyfluo
def compute_motion_AG(mov, max_shift_hw=(5,5), show_movie=False,template=np.median,interpolation=cv2.INTER_LINEAR,in_place=False):
        """                
        Performs motion corretion using the opencv matchtemplate function. At every iteration a template is built by taking the median of all frames and then used to align the other frames.
         
        Parameters
        ----------
        max_shift: maximum pixel shifts allowed when correcting
        show_movie : display the movie wile correcting it
        in_place: if True the input vector is overwritten
        
        Returns
        -------
        movCorr: motion corected movie              
        shifts : tuple, contains shifts in x and y and correlation with template
        template: the templates created at each iteration
        """
        
        if not in_place:
            mov=mov.copy()
           
        mov=mov.astype(np.float32)    
        n_frames_,h_i, w_i = mov.shape
        
        ms_h,ms_w=max_shift_hw
        
        if callable(template):
            template=template(mov,axis=0)            
        elif not type(template) == np.ndarray:
            raise Exception('Only matrices or function accepted')
        
            
        template=template[ms_h:h_i-ms_h,ms_w:w_i-ms_w].astype(np.float32)    
        h,w = template.shape      # template width and height
        
        #if show_movie:
        #    cv2.imshow('template',template/255)
        #    cv2.waitKey(2000) 
        #    cv2.destroyAllWindows()
        
        #% run algorithm, press q to stop it 
        shifts=[];   # store the amount of shift in each frame
        pbar = ProgressBar(maxval=n_frames_).start()
        for i,frame in enumerate(mov):
             pbar.update(i)             
             res = cv2.matchTemplate(frame,template,cv2.TM_CCORR_NORMED)
             avg_corr=np.mean(res);
             top_left = cv2.minMaxLoc(res)[3]
             sh_y,sh_x = top_left
             bottom_right = (top_left[0] + w, top_left[1] + h)
        
             if (0 < top_left[1] < 2 * ms_h-1) & (0 < top_left[0] < 2 * ms_w-1):
                 # if max is internal, check for subpixel shift using gaussian
                 # peak registration
                 log_xm1_y = np.log(res[sh_x-1,sh_y]);             
                 log_xp1_y = np.log(res[sh_x+1,sh_y]);             
                 log_x_ym1 = np.log(res[sh_x,sh_y-1]);             
                 log_x_yp1 = np.log(res[sh_x,sh_y+1]);             
                 four_log_xy = 4*np.log(res[sh_x,sh_y]);
    
                 sh_x_n = -(sh_x - ms_h + (log_xm1_y - log_xp1_y) / (2 * log_xm1_y - four_log_xy + 2 * log_xp1_y))
                 sh_y_n = -(sh_y - ms_w + (log_x_ym1 - log_x_yp1) / (2 * log_x_ym1 - four_log_xy + 2 * log_x_yp1))
             else:
                 sh_x_n = -(sh_x - ms_h)
                 sh_y_n = -(sh_y - ms_w)
                     
             M = np.float32([[1,0,sh_y_n],[0,1,sh_x_n]])
             mov[i] = cv2.warpAffine(frame,M,(w_i,h_i),flags=interpolation)

             shifts.append([sh_x_n,sh_y_n,avg_corr]) 
                 
             if show_movie:        
                 fr = cv2.resize(mov[i],None,fx=2, fy=2, interpolation = cv2.INTER_CUBIC)
                 cv2.imshow('frame',fr/255.0)
                 if cv2.waitKey(1) & 0xFF == ord('q'):
                     cv2.destroyAllWindows()
                     break 
        pbar.finish()         
        cv2.destroyAllWindows()
        return (mov,template,shifts)
예제 #14
0
def bloom_b3ds(blob3dlist, stitch=False):
    allb2ds = [Blob2d.get(b2d) for b3d in blob3dlist for b2d in b3d.blob2ds]
    printl('\nProcessing internals of ' + str(len(allb2ds)) +
           ' 2d blobs via \'blooming\' ',
           end='')
    t_start_bloom = time.time()
    num_unbloomed = len(allb2ds)
    pb = ProgressBar(max_val=sum(len(b2d.pixels) for b2d in allb2ds),
                     increments=50)
    for bnum, blob2d in enumerate(allb2ds):
        blob2d.gen_internal_blob2ds(
        )  # NOTE will have len 0 if no blooming can be done
        pb.update(len(blob2d.pixels), set_val=False
                  )  # set is false so that we add to an internal counter
    pb.finish()

    print_elapsed_time(t_start_bloom, time.time(), prefix='took')
    printl('Before blooming there were: ' + str(num_unbloomed) +
           ' b2ds contained within b3ds, there are now ' +
           str(len(Blob2d.all)))

    # Setting possible_partners
    printl(
        'Pairing all new blob2ds with their potential partners in adjacent slides'
    )
    max_avail_depth = max(b2d.recursive_depth for b2d in Blob2d.all.values())
    for cur_depth in range(max_avail_depth)[1:]:  # Skip those at depth 0
        depth = [
            b2d.id for b2d in Blob2d.all.values()
            if b2d.recursive_depth == cur_depth
        ]
        max_h_d = max(Blob2d.all[b2d].height for b2d in depth)
        min_h_d = min(Blob2d.all[b2d].height for b2d in depth)
        ids_by_height = [[] for _ in range(max_h_d - min_h_d + 1)]
        for b2d in depth:
            ids_by_height[Blob2d.get(b2d).height - min_h_d].append(b2d)
        for height_val, h in enumerate(
                ids_by_height[:-1]):  # All but the last one
            for b2d in h:
                b2d = Blob2d.all[b2d]
                b2d.set_possible_partners(ids_by_height[height_val + 1])

    # Creating b3ds
    printl('Creating 3d blobs from the generated 2d blobs')
    all_new_b3ds = []
    for depth_offset in range(
            max_avail_depth + 1
    )[1:]:  # Skip offset of zero, which refers to the b3ds which have already been stitched
        printd('Depth_offset: ' + str(depth_offset), Config.debug_blooming)
        new_b3ds = []
        for b3d in blob3dlist:
            all_d1_with_pp_in_this_b3d = []
            for b2d in b3d.blob2ds:
                # Note this is the alternative to storing b3dID with b2ds
                b2d = Blob2d.get(b2d)
                d_1 = [
                    blob for blob in b2d.getdescendants()
                    if blob.recursive_depth == b2d.recursive_depth +
                    depth_offset
                ]
                if len(d_1):
                    for desc in d_1:
                        if len(desc.possible_partners):
                            all_d1_with_pp_in_this_b3d.append(desc.id)
            all_d1_with_pp_in_this_b3d = set(all_d1_with_pp_in_this_b3d)
            if len(all_d1_with_pp_in_this_b3d) != 0:
                printd(' Working on b3d: ' + str(b3d), Config.debug_blooming)
                printd(
                    '  Len of all_d1_with_pp: ' +
                    str(len(all_d1_with_pp_in_this_b3d)),
                    Config.debug_blooming)
                printd('  They are: ' + str(all_d1_with_pp_in_this_b3d),
                       Config.debug_blooming)
                printd(
                    '   = ' + str(
                        list(
                            Blob2d.get(b2d)
                            for b2d in all_d1_with_pp_in_this_b3d)),
                    Config.debug_blooming)
            for b2d in all_d1_with_pp_in_this_b3d:
                b2d = Blob2d.get(b2d)
                printd(
                    '    Working on b2d: ' + str(b2d) + ' with pp: ' +
                    str(b2d.possible_partners), Config.debug_blooming)
                if b2d.b3did == -1:  # unset
                    cur_matches = [
                        b2d
                    ]  # NOTE THIS WAS CHANGED BY REMOVED .getdescendants() #HACK
                    for pp in b2d.possible_partners:
                        printd(
                            "     *Checking if pp:" + str(pp) +
                            ' is in all_d1: ' +
                            str(all_d1_with_pp_in_this_b3d),
                            Config.debug_blooming)
                        if pp in all_d1_with_pp_in_this_b3d:  # HACK REMOVED
                            printd("     Added partner: " + str(pp),
                                   Config.debug_blooming)
                            cur_matches += [
                                Blob2d.get(b)
                                for b in Blob2d.get(pp).getpartnerschain()
                            ]
                    if len(cur_matches) > 1:
                        printd("**LEN OF CUR_MATCHES MORE THAN 1",
                               Config.debug_blooming)
                        new_b3d_list = [
                            blob.id for blob in set(cur_matches)
                            if blob.recursive_depth == b2d.recursive_depth
                            and blob.b3did == -1
                        ]
                        if len(new_b3d_list):
                            new_b3ds.append(
                                Blob3d(new_b3d_list,
                                       r_depth=b2d.recursive_depth))
        all_new_b3ds += new_b3ds
    printl(' Made a total of ' + str(len(all_new_b3ds)) + ' new b3ds')

    if stitch:
        # Set up shape contexts
        printl('Setting shape contexts for stitching')
        for b2d in [
                Blob2d.all[b2d] for b3d in all_new_b3ds for b2d in b3d.blob2ds
        ]:
            b2d.set_shape_contexts(36)

        # Stitching
        printl('Stitching the newly generated 2d blobs')
        for b3d_num, b3d in enumerate(all_new_b3ds):
            printl(' Working on b3d: ' + str(b3d_num) + ' / ' +
                   str(len(all_new_b3ds)))
            Pairing.stitch_blob2ds(b3d.blob2ds, debug=False)
    return all_new_b3ds
예제 #15
0
    def GenerateNeighborList(self, structures):
        if self.log is not None:
            self.log.log("Generating Neighbor List")
            self.log.indent()

        # For each atom within each structure, we need to generate a list
        # of atoms within the cutoff distance. Periodic images need to be
        # accounted for during this process. Neighbors in this list are
        # specified as coordinates, rather than indices.

        # The final return value of this function in a 3 dimensional list,
        # with the following access structure:
        #     neighbor = list[structure][atom][neighbor_index]

        # First we will compute the total number of atoms that need to be
        # processed in order to get an estimate of the time this will take
        # to complete.
        n_total = sum([struct.n_atoms**2 for struct in structures])

        progress = ProgressBar("Neighbor List ", 22, n_total, update_every=25)
        progress.estimate = False

        # IMPORTANT NOTE: This needs to be multiplied by 1.5 when PINN
        #                 gets implemented.
        cutoff = self.config.cutoff_distance * 1.0

        n_processed = 0

        structure_start = 0
        structure_stride = 0
        for structure in structures:

            # Normalize the translation vectors.
            a1_n = np.linalg.norm(structure.a1)
            a2_n = np.linalg.norm(structure.a2)
            a3_n = np.linalg.norm(structure.a3)

            # Numpy will automatically convert these to arrays when they are
            # passed to numpy functions, but it will do that each time we call
            # a function. Converting them beforehand will save some time.
            a1 = structure.a1
            a2 = structure.a2
            a3 = structure.a3

            # Determine the number of times to repeat the
            # crystal structure in each direction.

            x_repeat = int(np.ceil(cutoff / a1_n))
            y_repeat = int(np.ceil(cutoff / a2_n))
            z_repeat = int(np.ceil(cutoff / a3_n))

            # Now we construct an array of atoms that contains all
            # of the repeated atoms that are necessary. We need to
            # repeat the crystal structure from -repeat*A_n to
            # positive repeat*A_n.

            # This is the full periodic structure that we generate.
            # It is a list of vectors, each vector being a length 3
            # list of floating points.
            n_periodic_atoms = (2 * x_repeat + 1)
            n_periodic_atoms *= (2 * y_repeat + 1)
            n_periodic_atoms *= (2 * z_repeat + 1)
            n_periodic_atoms *= structure.n_atoms
            periodic_structure = np.zeros((n_periodic_atoms, 3))
            atom_idx = 0
            for i in range(-x_repeat, x_repeat + 1):
                for j in range(-y_repeat, y_repeat + 1):
                    for k in range(-z_repeat, z_repeat + 1):
                        # This is the new location to use as the center
                        # of the crystal lattice.
                        center_location = a1 * i + a2 * j + a3 * k

                        # Now we add each atom + new center location
                        # into the periodic structure.
                        for atom in structure.atoms:
                            val = atom + center_location
                            periodic_structure[atom_idx] = val
                            atom_idx += 1

            # Here we actually iterate over every atom and then for each atom
            # determine which atoms are within the cutoff distance.
            for atom in structure.atoms:
                # This statement will subtract the current atom position from
                # the position of each potential neighbor, element wise. It
                # will then calculate the magnitude of each of these vectors
                # element  wise.
                distances = np.linalg.norm(periodic_structure - atom, axis=1)

                # This is special numpy syntax for selecting all items in an
                # array  that meet a condition. The boolean operators in the
                # square  brackets actually convert the 'distances' array into
                # two arrays  of boolean values and then computes their
                # boolean 'and' operation element wise. It then selects all
                # items in the array  'periodic_structure' that correspond to
                # a value of true in the  array of boolean values.
                mask = (distances > 1e-8) & (distances < cutoff)
                neighbors = periodic_structure[mask]

                # This line just takes all of the neighbor vectors that we now
                # have (as absolute vectors) and changes them into vectors
                # relative to the atom that we are currently finding neighbors
                # for.
                neighbor_vecs = neighbors - atom

                self.atom_neighbors.append(neighbor_vecs)

                structure_stride += 1

            self.structure_strides.append((structure_start, structure_stride))
            structure_start = structure_stride

            # Update the performance information so we can report
            # progress to the user.
            n_processed += structure.n_atoms**2
            progress.update(n_processed)

        progress.update(n_total)
        progress.finish()

        if self.log is not None:
            self.log.log("Time Elapsed = %ss" % progress.ttc)
            self.log.unindent()