Пример #1
0
def calc_psf_and_center(ins, wave, offset_r, offset_theta, oversample, pix_scale, fov_pixels, trim_fov_pixels=None):
    '''
    Following the treatment in pandeia_data/dev/make_psf.py to handle
    off-center PSFs for use as a kernel in later convolutions.
    '''

    if offset_r > 0.:
        #roll back to center
        dx = int(np.rint( offset_r * np.sin(np.deg2rad(offset_theta)) / pix_scale ))
        dy = int(np.rint( offset_r * np.cos(np.deg2rad(offset_theta)) / pix_scale ))
        dmax = np.max([np.abs(dx), np.abs(dy)])

        # pandeia forces offset to nearest integer subsampled pixel.
        # At the risk of having subpixel offsets in the recentering,
        # I'm not sure we want to do this in order to capture
        # small-scale spatial variations properly.
        #ins.options['source_offset_r'] = np.sqrt(dx**2 + dy**2) * pix_scale

        psf_result = ins.calc_psf(monochromatic=wave*1e-6, oversample=oversample, fov_pixels=fov_pixels + 2*dmax)

        image = psf_result[0].data
        image = np.roll(image, dx * oversample, axis=1)
        image = np.roll(image, -dy * oversample, axis=0)
        image = image[dmax * oversample:(fov_pixels + dmax) * oversample,
                      dmax * oversample:(fov_pixels + dmax) * oversample]
        #trim if requested
        if trim_fov_pixels is not None:
            trim_amount = int(oversample * (fov_pixels - trim_fov_pixels) / 2)
            image = image[trim_amount:-trim_amount, trim_amount:-trim_amount]
        psf_result[0].data = image
    else:
        psf_result = ins.calc_psf(monochromatic=wave*1e-6, oversample=oversample, fov_pixels=fov_pixels)

    return psf_result
    def time_slice(self, t_start, t_stop):
        """
        Creates a new AnalogSignal corresponding to the time slice of the
        original AnalogSignal between times t_start, t_stop. Note, that for
        numerical stability reasons if t_start, t_stop do not fall exactly on
        the time bins defined by the sampling_period they will be rounded to
        the nearest sampling bins.
        """

        t_start = t_start.rescale(self.sampling_period.units)
        t_stop = t_stop.rescale(self.sampling_period.units)
        i = (t_start - self.t_start) / self.sampling_period
        j = (t_stop - self.t_start) / self.sampling_period
        i = int(np.rint(i.magnitude))
        j = int(np.rint(j.magnitude))

        if (i < 0) or (j > len(self)):
            raise ValueError('t_start, t_stop have to be withing the analog \
                              signal duration')

        # we're going to send the list of indicies so that we get *copy* of the
        # sliced data
        obj = super(BaseAnalogSignal, self).__getitem__(np.arange(i, j, 1))
        obj.t_start = self.t_start + i * self.sampling_period

        return obj
Пример #3
0
    def process(old_face, new_face, raw_mask):
        height, width, _ = old_face.shape
        height = height // 2
        width = width // 2

        y_indices, x_indices, _ = np.nonzero(raw_mask)
        y_crop = slice(np.min(y_indices), np.max(y_indices))
        x_crop = slice(np.min(x_indices), np.max(x_indices))
        y_center = int(np.rint((np.max(y_indices) + np.min(y_indices)) / 2 + height))
        x_center = int(np.rint((np.max(x_indices) + np.min(x_indices)) / 2 + width))

        insertion = np.rint(new_face[y_crop, x_crop] * 255.0).astype("uint8")
        insertion_mask = np.rint(raw_mask[y_crop, x_crop] * 255.0).astype("uint8")
        insertion_mask[insertion_mask != 0] = 255
        prior = np.rint(np.pad(old_face * 255.0,
                               ((height, height), (width, width), (0, 0)),
                               'constant')).astype("uint8")

        blended = cv2.seamlessClone(insertion,  # pylint: disable=no-member
                                    prior,
                                    insertion_mask,
                                    (x_center, y_center),
                                    cv2.NORMAL_CLONE)  # pylint: disable=no-member
        blended = blended[height:-height, width:-width]

        return blended.astype("float32") / 255.0
Пример #4
0
def jacobianMatrix(diffeo, resol=[1.,1.,1.], periodic=False):
    if diffeo.ndim > 4:
        print 'No jacobian in dimension larget than 3'
        return

    if diffeo.ndim == 4:
        if periodic == True:
            w = np.mgrid[0:diffeo.shape[1], 0:diffeo.shape[2], 0:diffeo.shape[3]]
            dw = diffeo-w
            for k in range(3):
                diffeo[k,:,:,:] -= np.rint(dw[k,:,:,:]/diffeo.shape[k+1])*diffeo.shape[k+1]
        grad = np.zeros([3,3,diffeo.shape[1], diffeo.shape[2], diffeo.shape[3]])
        grad[0,:,:,:,:] = gradient(np.squeeze(diffeo[0,:,:,:]), resol=resol)
        grad[1,:,:,:,:] = gradient(np.squeeze(diffeo[1,:,:,:]), resol=resol)
        grad[2,:,:,:,:] = gradient(np.squeeze(diffeo[2,:,:,:]), resol=resol)
    elif diffeo.ndim == 3:
        if periodic == True:
            w = np.mgrid[0:diffeo.shape[1], 0:diffeo.shape[2]]
            dw = diffeo-w
            for k in range(2):
                diffeo[k,:,:] -= np.rint(dw[k,:,:]/diffeo.shape[k+1])*diffeo.shape[k+1]
        grad = np.zeros([2,2,diffeo.shape[1], diffeo.shape[2]])
        grad[0,:,:,:] = gradient(np.squeeze(diffeo[0,:,:]), resol=resol)
        grad[1,:,:,:] = gradient(np.squeeze(diffeo[1,:,:]), resol=resol)
    else:
        if periodic == True:
            w = np.mgrid[0:diffeo.shape[0]]
            dw = diffeo-w
            diffeo -= np.rint(dw/diffeo.shape[0])*diffeo.shape[0]
        grad =  np.fabs(gradient(np.squeeze(diffeo)), resol=resol)
    return grad
Пример #5
0
    def ll2xy(self, lat, lon):
        if not self.swnavdisp:
            # RADAR mode:
            # Convert lat/lon to pixel x,y

            # Normal case
            if self.lon1 > self.lon0:
                x = self.width * (lon - self.lon0) / (self.lon1 - self.lon0)

            # Wrap around:
            else:
                dellon = 180. - self.lon0 + self.lon1 + 180.
                xlon = lon + (lon < 0.) * 360.
                x = (xlon - self.lon0) / dellon * self.width

            y = self.height * (self.lat1 - lat) / (self.lat1 - self.lat0)
        else:
            # NAVDISP mode:
            qdr, dist = geo.qdrdist(self.ndlat, self.ndlon, lat, lon)
            alpha = np.radians(qdr - self.ndcrs)
            base = 30. * (self.lat1 - self.lat0)
            x = dist * np.sin(alpha) / base * self.height + self.width / 2
            y = -dist * np.cos(alpha) / base * self.height + self.height / 2

        return np.rint(x), np.rint(y)
Пример #6
0
    def _shift2boolean(self,
                       q_mesh_shift,
                       is_gamma_center=False,
                       tolerance=1e-5):
        """
        Tolerance is used to judge zero/half gird shift.
        This value is not necessary to be changed usually.
        """
        if q_mesh_shift is None:
            shift = np.zeros(3, dtype='double')
        else:
            shift = np.array(q_mesh_shift, dtype='double')
    
        diffby2 = np.abs(shift * 2 - np.rint(shift * 2))
        if (diffby2 < 0.01).all(): # zero/half shift
            if is_gamma_center:
                is_shift = [0, 0, 0]
            else: # Monkhorst-pack
                diff = np.abs(shift - np.rint(shift))
                is_shift = list(np.logical_xor((diff > 0.1),
                                               (self._mesh % 2 == 0)) * 1)
        else:
            is_shift = None

        return is_shift
    def solution(self):
        if self.workers == 1:
            # there is just 1 worker at the bottom, then the tree is linear
            # height moves up in powers of 2

            if self.height == 1: # just a single worker
                return 0, 1
            else:
                k = np.rint(np.log(self.height) / np.log(2))
                return k, 2 ** (k + 1) - 1

        ratio = np.log(self.height) / np.log(self.workers)
        N = self.binary_search(ratio, 1, self.workers)

        if N is None:
            raise ValueError, "No solution exists for [%d, %d]!" \
                              % (self.height, self.workers)

        k = np.rint(np.log(self.workers) / np.log(N))
        assert self.height == (N+1) ** k, \
            "This should not happen!"

        inodes = (self.workers - 1) / (N - 1)
        ipl = self.height * (N+1) - self.workers * N

        return inodes, ipl
Пример #8
0
def rotate_clip(data_np, theta_deg, rotctr_x=None, rotctr_y=None,
                out=None):
    """
    Rotate numpy array `data_np` by `theta_deg` around rotation center
    (rotctr_x, rotctr_y).  If the rotation center is omitted it defaults
    to the center of the array.

    No adjustment is done to the data array beforehand, so the result will
    be clipped according to the size of the array (the output array will be
    the same size as the input array).
    """
    
    # If there is no rotation, then we are done
    if math.fmod(theta_deg, 360.0) == 0.0:
        return data_np

    ht, wd = data_np.shape[:2]

    if rotctr_x == None:
        rotctr_x = wd // 2
    if rotctr_y == None:
        rotctr_y = ht // 2

    yi, xi = numpy.mgrid[0:ht, 0:wd]
    xi -= rotctr_x
    yi -= rotctr_y
    cos_t = numpy.cos(numpy.radians(theta_deg))
    sin_t = numpy.sin(numpy.radians(theta_deg))

    #t1 = time.time()
    if have_numexpr:
        ap = ne.evaluate("(xi * cos_t) - (yi * sin_t) + rotctr_x")
        bp = ne.evaluate("(xi * sin_t) + (yi * cos_t) + rotctr_y")
    else:
        ap = (xi * cos_t) - (yi * sin_t) + rotctr_x
        bp = (xi * sin_t) + (yi * cos_t) + rotctr_y
    #print "rotation in %.5f sec" % (time.time() - t1)

    #ap = numpy.rint(ap).astype('int').clip(0, wd-1)
    #bp = numpy.rint(bp).astype('int').clip(0, ht-1)
    # Optomizations to reuse existing intermediate arrays
    numpy.rint(ap, out=ap)
    ap = ap.astype('int')
    ap.clip(0, wd-1, out=ap)
    numpy.rint(bp, out=bp)
    bp = bp.astype('int')
    bp.clip(0, ht-1, out=bp)

    if out != None:
        out[:, :, ...] = data_np[bp, ap]
        newdata = out
    else:
        newdata = data_np[bp, ap]
        new_ht, new_wd = newdata.shape[:2]

        assert (wd == new_wd) and (ht == new_ht), \
               Exception("rotated cutout is %dx%d original=%dx%d" % (
            new_wd, new_ht, wd, ht))

    return newdata
Пример #9
0
    def makeMaskCircle(self):
        self.initMask()
        if self.parent.data is not None and self.maskingMode > 0:
            (radiusX, radiusY) = self.mask_circle.size()
            (cornerX, cornerY) = self.mask_circle.pos()
            i0, j0 = np.meshgrid(range(int(radiusY)),
                                 range(int(radiusX)), indexing='ij')
            r = np.sqrt(np.square((i0 - radiusY / 2).astype(np.float)) +
                        np.square((j0 - radiusX / 2).astype(np.float)))
            i0 = np.rint(i0[np.where(r < radiusY / 2.)] + cornerY).astype(np.int)
            j0 = np.rint(j0[np.where(r < radiusX / 2.)] + cornerX).astype(np.int)
            i01 = i0[(i0 >= 0) & (i0 < self.parent.data.shape[1]) & (j0 >= 0) & (j0 < self.parent.data.shape[0])]
            j01 = j0[(i0 >= 0) & (i0 < self.parent.data.shape[1]) & (j0 >= 0) & (j0 < self.parent.data.shape[0])]

            _mask = np.ones_like(self.parent.data)
            _mask[j01, i01] = 0
            if self.maskingMode == 1:  # masking mode
                self.userMaskAssem *= _mask
            elif self.maskingMode == 2:  # unmasking mode
                self.userMaskAssem[j01, i01] = 1
            elif self.maskingMode == 3:  # toggle mode
                self.userMaskAssem[j01, i01] = (1 - self.userMaskAssem[j01, i01])

            # update userMask
            self.userMask = self.parent.det.ndarray_from_image(self.parent.evt, self.userMaskAssem, pix_scale_size_um=None,
                                                        xy0_off_pix=None)

            self.displayMask()
            self.parent.pk.algInitDone = False
            self.parent.pk.updateClassification()
        if self.parent.args.v >= 1: print "done makeMaskCircle!!!!!!"
Пример #10
0
 def angle_diff(self, i):
     """Determine the polar angle difference between the calculated 
     (hkl) and the closest integer (hkl) of the specified peak"""
     h, k, l = self.hkl(i)
     (h0, k0, l0) = (np.rint(h), np.rint(k), np.rint(l))
     polar0 = 2 * np.arcsin(self.unitcell.ds((h0,k0,l0))*self.wavelength/2)
     return np.abs(self.polar(i) - polar0)
Пример #11
0
def process_coordinates_from_data(fit_parameters, frames, dt):
    """(center_x, center_y, resting_x, resting_y, extended_x, extended_y)
    finds the resting and extended position for each dot, using the data."""
    X, Y = 0, 1
    center_x = ((fit_parameters[-1][0].offset - fit_parameters[-1][0].amplitude) -
                (fit_parameters[0][0].offset + fit_parameters[0][0].amplitude)) / 2 + fit_parameters[0][0].offset
    center_y = ((fit_parameters[-1][1].offset - fit_parameters[-1][1].amplitude) -
                (fit_parameters[0][1].offset + fit_parameters[0][1].amplitude)) + fit_parameters[0][1].offset

    # resting y positions fall when y is maximized, at t = period (pi/2 - phase) / (2 pi)
    # (this is because of our choice of coordinate system, where extension is up, towards 0)
    N = len(frames)
    y_max_t = [np.arange(start=yfit.period * (pi/2-yfit.phase) / (2*pi),
                         stop=N*dt,
                         step=yfit.period) for _, yfit in fit_parameters]
    y_max_t = [a[a > 0] for a in y_max_t]

    # extended y positions fall when y is minimized, at t = period (3 pi / 2 - phase) / (2 pi)
    y_min_t = [np.arange(start=yfit.period * (3*pi/2-yfit.phase) / (2*pi),
                         stop=N*dt,
                         step=yfit.period) for _, yfit in fit_parameters]
    y_min_t = [a[a > 0] for a in y_min_t]

    y_max_i = [np.rint(yt / dt).astype(int) for yt in y_max_t]
    y_min_i = [np.rint(yt / dt).astype(int) for yt in y_min_t]

    resting_x, resting_y = [], []
    extended_x, extended_y = [], []
    n_dots = len(frames[0])
    for dot in range(n_dots):
        extended_x.append(mean([frames[i][dot].xpos for i in y_min_i[dot]]))
        extended_y.append(mean([frames[i][dot].ypos for i in y_min_i[dot]]))
        resting_x.append(mean([frames[i][dot].xpos for i in y_max_i[dot]]))
        resting_y.append(mean([frames[i][dot].ypos for i in y_max_i[dot]]))
    return (center_x, center_y, resting_x, resting_y, extended_x, extended_y)
Пример #12
0
def writeImageFile(xrayImageFilename,BMDprojected,imageSize,imageFormat='bmp',smooth=True):
    """Create an image from array and write to file"""
    
    # Re-import PIL.Image and PIL.ImageFilter into current function namespace
    from PIL import Image, ImageFilter
    
    # Convert to 8-bit
    xray = BMDprojected.copy()
    xray = np.asarray(xray,dtype=np.int8)

    # Create image from array.
    xray = xray[:,::-1]       
    xrayImage = Image.fromarray(xray.transpose(),mode='L')
    
    # Resize image
    xsize,ysize = xrayImage.size    
    bigSide = np.argmax(xrayImage.size)
    if bigSide==0: scale = float(imageSize)/xsize
    else:          scale = float(imageSize)/ysize
    xsize = int(np.rint(scale*xsize))
    ysize = int(np.rint(scale*ysize))  
    xrayImage = xrayImage.resize((xsize,ysize),Image.BILINEAR)
    if smooth: xrayImage = xrayImage.filter(ImageFilter.SMOOTH)
    
    # Save xray image to file
    if xrayImageFilename.split('.')[-1]!=imageFormat: xrayImageFilename+='.%s' % imageFormat
    xrayImage.save(xrayImageFilename,imageFormat) 

    return
Пример #13
0
 def diff(self, i):
     """Determine the reciprocal space difference between the calculated 
     (hkl) and the closest integer (hkl) of the specified peak"""
     h, k, l = self.hkl(i)
     Q = np.matrix((h, k, l)).T
     Q0 = np.matrix((np.rint(h), np.rint(k), np.rint(l))).T
     return norm(self.Bmat * (Q - Q0))
Пример #14
0
    def output(self):
        """Return the drawn line and the resulting scan.

        Returns
        -------
        line_image : (M, N) uint8 array, same shape as image
            An array of 0s with the scanned line set to 255.
            If the linewidth of the line tool is greater than 1,
            sets the values within the profiled polygon to 128.
        scan : (P,) or (P, 3) array of int or float
            The line scan values across the image.
        """
        end_points = self.line_tool.end_points
        line_image = np.zeros(self.image_viewer.image.shape[:2],
                              np.uint8)
        width = self.line_tool.linewidth
        if width > 1:
            rp, cp = measure.profile._line_profile_coordinates(
                *end_points[:, ::-1], linewidth=width)
            # the points are aliased, so create a polygon using the corners
            yp = np.rint(rp[[0, 0, -1, -1],[0, -1, -1, 0]]).astype(int)
            xp = np.rint(cp[[0, 0, -1, -1],[0, -1, -1, 0]]).astype(int)
            rp, cp = draw.polygon(yp, xp, line_image.shape)
            line_image[rp, cp] = 128
        (x1, y1), (x2, y2) = end_points.astype(int)
        rr, cc = draw.line(y1, x1, y2, x2)
        line_image[rr, cc] = 255
        return line_image, self.scan_data
Пример #15
0
def symmetryError(latt, parentlatt):
    """check that the lattice obeys all symmetry operations of a parent lattice:  R.latt.inv(R) will give an integer matrix"""
    symmerr = 0.0
    for iop in range(parentlatt.nops):
        lmat = array(latt)
        #        if det(lmat) == 0:
        #            print 'Determinant zero'
        #            print lmat
        #            return symmerror
        mmat = trimSmall(dot(dot(inv(lmat), parentlatt.symops[:, :, iop]), lmat))
        #        print 'mmat', iop
        #        print trimSmall(mmat)
        operr = 0.0
        for i in range(3):
            for j in range(3):
                if abs(rint(mmat[i, j]) - mmat[i, j]) > 1.0e-4:
                    operr += abs(rint(mmat[i, j]) - mmat[i, j])
        #                    print iop, 'Symmetry failed for mmat[i,j]',mmat[i,j]
        #                    print 'Cartesian operator'
        #                    print parentlatt.symops[:,:,iop]
        #                    print 'Cartesian Lattice'
        #                    print lmat
        if operr > 2.0e-4:
            symmerr += operr
    #            print 'Noninteger operator in superlattice for operation %s, with error %s.' % (iop,str(operr))
    #    if operr < 2.0e-4:
    #        return 0.0
    #    else:
    return symmerr
Пример #16
0
def worker(p,loadPerWorker, projections, combined_matrix, z_voxel_coords,
            transform_matrix, z_voxels, detector_rows, detector_columns,
            recon_volume, volume_weight):
    for ii in xrange(loadPerWorker):
        flat_proj_data = projections[p+ii].ravel()

        for z in xrange(z_voxels):

    # Put current z voxel into combined_matrix
            combined_matrix[2, :] = z_voxel_coords[z]

    # Find the mapping between volume voxels and detector pixels
    # for the current angle
            vol_det_map = dot(transform_matrix[p+ii], combined_matrix)
            map_cols = rint(divide(vol_det_map[0, :], vol_det_map[2, :
                            ])).astype(int32)
            map_rows = rint(divide(vol_det_map[1, :], vol_det_map[2, :
                            ])).astype(int32)

    # Find the detector pixels that contribute to the current slice
    # xrays that hit outside the detector area are masked out
            mask = (map_cols >= 0) & (map_rows >= 0) & (map_cols
                    < detector_columns) & (map_rows < detector_rows)

    # The projection pixels that contribute to the current slice
            proj_indexs = map_cols * mask + map_rows * mask \
                    * detector_columns

    # Add the weighted projection pixel values to their
    # corresponding voxels in the z slice
            #r = recon_volume[(p/32),:,:,:]
            recon_volume[z].flat += flat_proj_data[proj_indexs] \
                    * volume_weight[p+ii] * mask
Пример #17
0
 def __init__(self, n=None, azim_max=np.pi/2,
              diameter=speed_of_sound_in_air*650*usecond,
              itd=None, samplerate=None, fractional_itds=False):
     if itd is None:
         azim = np.linspace(-azim_max, azim_max, n)
         itd = diameter*np.sin(azim)/speed_of_sound_in_air
         coords = make_coordinates(azim=azim, itd=itd)
     else:
         coords = make_coordinates(itd=itd)
     self.itd = itd
     samplerate = self.samplerate = get_samplerate(samplerate)
     if not fractional_itds:
         dl = itd.copy()
         dr = -itd
         dl[dl<0] = 0
         dr[dr<0] = 0
         dl = np.array(np.rint(dl*samplerate), dtype=int)
         dr = np.array(np.rint(dr*samplerate), dtype=int)
         idxmax = max(np.amax(dl), np.amax(dr))
         data = np.zeros((2, len(itd), idxmax+1))
         data[0, np.arange(len(itd)), dl] = 1
         data[1, np.arange(len(itd)), dr] = 1
     else:
         delays = np.hstack((itd/2, -itd/2))
         fd = FractionalDelay(silence(1*ms, samplerate=samplerate), delays)
         ir = fd.impulse_response
         data = np.zeros((2, len(itd), fd.filter_length))
         data[0, :, :] = ir[:len(itd), :]
         data[1, :, :] = ir[len(itd):, :]
         self.delay_offset = fd.delay_offset
     self.hrtfset = HRTFSet(data, samplerate, coords)
     self.hrtfset.name = 'ITDDatabaseSubject'
     self.subjects = ['0']
def MakeOperator(coefficients, terms, verbose=False):

  # Initialize.
  n_terms = len(coefficients)
  one_percent = numpy.rint(numpy.ceil(n_terms / 100.))
  start = time.clock()
  n_orbitals = commutators.OrbitalCount(terms)
  jw_terms = GetJordanWignerTerms(n_orbitals)

  # Loop over terms.
  operator = 0
  for i, (coefficient, term) in enumerate(zip(coefficients, terms)):
    operator = operator + MatrixForm(coefficient, term, jw_terms)

    # Report progress.
    if verbose and not (i + 1) % one_percent:
      percent_complete = numpy.rint(100. * (i + 1) / n_terms)
      elapsed = time.clock() - start
      rate = elapsed / percent_complete
      eta = rate * (100 - percent_complete)
      print('%s. Computation %i%% complete. Approximately %i '
            'minute(s) remaining.' % (time.strftime(
                '%B %d at %H:%M:%S', time.localtime()),
            percent_complete, round(eta / 60)))

  assert IsHermitian(operator)
  return operator
Пример #19
0
    def lookup(self, position, energy, interpolation=False):
        """Differential flux.

        Parameters
        ----------
        position : `~astropy.coordinates.SkyCoord`
            Position on the sky.
        energy : `~astropy.units.Quantity`
            Energy

        Returns
        -------
        flux : `~astropy.units.Quantity`
            Differential flux (1 / (cm2 MeV s sr))
        """
        # TODO: add interpolation option using NDDataArray
        if not position.shape == energy.shape:
            raise ValueError('Position and energy array must have the same shape.')

        z, y, x = self.wcs_skycoord_to_pixel(position, energy)

        if interpolation:
            shape = z.shape
            pix_coords = np.column_stack([x.flat, y.flat, z.flat])
            vals = self._interpolate(pix_coords)
            return vals.reshape(shape)
        else:
            return self.data[np.rint(z).astype('int'), np.rint(y).astype('int'),
                             np.rint(x).astype('int')]
Пример #20
0
def AnalyzeSamind(str_f_samind, str_f_sg, str_of_iost, str_of_iosg, int_cutoff=0):
    dfm_samind = pd.read_csv(str_f_samind, sep='\t', header=None)
    dfm_samind.columns = ['readID', 'batchID', 'chrom', 'sbeg', 'send', 'sgID', 'sgstrand', 'sgbeg', 'sgend', 'sgseq', 'c_site', 'CIGAR', 'idtype', 'idbeg', 'idend', 'idlen', 'fm_status', 'factor', 'count', 'freq']

    dfm_sg = pd.read_csv(str_f_sg, header=None, index_col=None, sep='\t')
    dfm_sg.columns = ['sgID', 'chrom', 'strand', 'sbeg', 'send', 'qseq', 'c_site']

    func_samind = lambda x: pd.DataFrame(dict(batchID=x.batchID.unique(),
                                           sgID=x.sgID.unique(),
                                           allrc = np.int(np.rint(x.freq.sum())),
                                           none=np.int(np.rint(x.freq[x.fm_status=='None'].sum())),
                                              r_ind = x.freq[x.fm_status!='None'].sum()/x.freq.sum() if x.freq.sum()!=0 else 0,
                                           inf=np.int(np.rint(x.freq[x.fm_status=='INF'].sum())),
                                           otf=np.int(np.rint(x.freq[x.fm_status=='OTF'].sum())),
                                           r_reffect=x.freq[x.fm_status=='OTF'].sum()/x.freq[x.fm_status!='None'].sum() if x.freq[x.fm_status!='None'].sum()!=0 else 0,
                                              r_effect=x.freq[x.fm_status=='OTF'].sum()/x.freq.sum() if x.freq.sum()!=0 else 0))
    
    dfm_iost = dfm_samind.groupby(['batchID', 'sgID'], group_keys=False).apply(func_samind).reset_index(drop=True).reindex(columns=['batchID', 'sgID', 'allrc', 'none', 'r_ind', 'inf', 'otf', 'r_reffect', 'r_effect'])

    func_io = lambda x: pd.DataFrame(dict(sgID=x.sgID.unique(),
                                          allrc=x.allrc.sum(),
                                          none=x.none.sum(),
                                          inf=x.inf.sum(),
                                          otf=x.otf.sum(),
                                          r_ind=(x.inf.sum()+x.otf.sum())/x.allrc.sum() if x.allrc.sum()!=0 else 0,
                                          r_reffect=x.otf.sum()/(x.inf.sum()+x.otf.sum()) if x.inf.sum()+x.otf.sum()!=0 else 0,
                                          r_effect=x.otf.sum()/x.allrc.sum() if x.allrc.sum()!=0 else 0))
    dfm_iost = dfm_iost.groupby(['sgID'], group_keys=False).apply(func_io).reset_index(drop=True).reindex(columns=['sgID', 'allrc', 'none', 'inf', 'otf', 'r_ind', 'r_reffect', 'r_effect'])
    dfm_iost = dfm_iost.ix[dfm_iost.allrc>=int_cutoff, ]
    dfm_iost.to_csv(str_of_iost, sep='\t', index=None)

    dfm_sg = pd.merge(dfm_sg, dfm_iost[['sgID']], on='sgID')
    dfm_sg.to_csv(str_of_iosg, sep='\t', header=None, index=None)
Пример #21
0
    def add_dirac(self, xpos, ypos, flux):
        """Add a delta function component to the current image.

        Args:
          xpos (float): Component position on first (fast) FITS axis /pix.
          ypos (float): Component position on second (slow) FITS axis /pix.
          flux (float): Integrated flux of component.

        Example:

        >>> import numpy as np
        >>> from imageoi.initimage import GreyImg
        >>> img = GreyImg('test', 64, 64, 0.25)
        >>> img.add_dirac(12.0, 37.0, 0.5)
        >>> max1 = np.zeros((64,))
        >>> max1[37] = 12
        >>> np.all(np.argmax(img.image, axis=1) == max1)
        True
        >>> max0 = np.zeros((64,))
        >>> max0[12] = 37
        >>> np.all(np.argmax(img.image, axis=0) == max0)
        True
        >>> img.add_dirac(13.5, 42.8, 0.25)
        >>> np.abs(np.sum(img.image) - 0.75) < 1e-6
        True

        """
        self.image[int(np.rint(ypos))][int(np.rint(xpos))] += flux
Пример #22
0
def main(args):
    files = args.files
    maskfile = args.maskfile
    buf = args.buffer
    loncol = args.loncol
    latcol = args.latcol

    print 'processing %d files ...' % len(files)
    ### mask
    if BUF2 is not None:
        xm, ym, mask = get_mask(maskfile, paddzeros=BUF2+2)
    else:
        xm, ym, mask = get_mask(maskfile, paddzeros=0)
    xm, ym = np.rint(xm), np.rint(ym)

    for fname in files:
        ### input
        f = tb.openFile(fname)
        data = f.root.data
        lon = data[:,loncol]
        lat = data[:,latcol]

        flg1, flg2 = apply_mask(lon, lat, xm, ym, mask, buf=buf, slon=0)
        if BUF2 is not None:
            _, flg3 = apply_mask(lon, lat, xm, ym, mask, buf=BUF2, slon=0)

        ### output 
        fnameout = os.path.splitext(fname)[0] + '_mask.h5'
        if BUF2 is not None:
            save_arr_as_mat(fnameout, [data[:], flg1, flg2, flg3])
        else:
            save_arr_as_mat(fnameout, [data[:], flg1, flg2])

        close_files()
Пример #23
0
  def get_map(self):
    import numpy as np
    map_data = "{:d} 0 {:d} {:d} {:d} 0 0\n".format(self.elements.shape[0], 
                                             self.n[0], self.n[1], self.n[2])

    my_n = np.copy(self.n)
    if not self.boundaries[0] == 'P':
      my_n[1] += 1
    if not self.boundaries[1] == 'P':
      my_n[0] += 1
    if not self.boundaries[5] == 'P':
      my_n[2] += 1

    for e in range(self.map.shape[0]):
      ix = np.rint((self.elements[e,0] - self.root[0])/self.delta[0])
      iy = np.rint((self.elements[e,4] - self.root[1])/self.delta[1])
      iz = np.rint((self.elements[e,8] - self.root[2])/self.delta[2])

      map_data += "{:d} {:d} {:d} {:d} {:d} {:d} {:d} {:d} {:d}\n".format(
                   self.map[e], 
                   get_ind(ix,   iy  , iz  , my_n),
                   get_ind(ix+1, iy  , iz  , my_n),
                   get_ind(ix,   iy+1, iz  , my_n),
                   get_ind(ix+1, iy+1, iz  , my_n),
                   get_ind(ix,   iy  , iz+1, my_n),
                   get_ind(ix+1, iy  , iz+1, my_n),
                   get_ind(ix,   iy+1, iz+1, my_n),
                   get_ind(ix+1, iy+1, iz+1, my_n))
     
    return map_data
Пример #24
0
def apply_filter(image, img_filter, horizontally=True):
    filter_size = img_filter.shape
    filter_size = filter_size[0]

    pad = filter_size // 2
    height, width = image.shape

    new_image = np.zeros((height, width), dtype=np.uint8)

    if horizontally:
        for h in range(pad, height - pad):
            for w in range(pad, width - pad):
                new_image[h, w] = np.clip(np.rint(
                    np.sum(np.multiply(image[h - pad: h + pad + 1, w - pad: w + pad + 1], img_filter))),
                    MIN_PIXEL_VALUE, MAX_PIXEL_VALUE)

                aa = np.sum(np.multiply(image[h - pad: h + pad + 1, w - pad: w + pad + 1], img_filter))
    else:
        for w in range(pad, width - pad):
            for h in range(pad, height - pad):
                new_image[h, w] = np.clip(np.rint(
                    np.sum(np.multiply(image[h - pad: h + pad + 1, w - pad: w + pad + 1], img_filter))),
                    MIN_PIXEL_VALUE, MAX_PIXEL_VALUE)

    return new_image
Пример #25
0
    def createFromRandomVector(vector, alphaDivisor, randomVector, alphaStage):
        if alphaDivisor is None:
            new = Solution(vector + numpy.rint(randomVector).astype(int))
        else:
            # Calculate new requests component
            newVector = numpy.zeros(vector.shape, dtype=object)
            if alphaStage == 0:
                newVector[0] = vector[0] \
                    + (Solution.sizeDomainRequestComponent * (alphaDivisor.numerator * int(round(randomVector[0] * Solution.randomPrecisionMult)))) \
                        // (alphaDivisor.denominator * Solution.randomPrecisionMult)
            else:
                newVector[0] = vector[0] + int(round(randomVector[0]))

            candidate = Solution(newVector)

            # Set the rest of the vector if the first component is valid
            if alphaStage != 0 and candidate.canApplyTimeAdjust():
                newVector[1:] = vector[1:] \
                    + (candidate.getSizeDomainEachBus() * (alphaDivisor.numerator * numpy.rint(randomVector[1:] * Solution.randomPrecisionMult).astype(int).astype(object))) \
                        // (alphaDivisor.denominator * Solution.randomPrecisionMult)
            else:
                newVector[1:] = vector[1:]
            new = Solution(newVector)

        return new
Пример #26
0
    def configure(self, bin_width_s, record_length_s, number_of_gates=0):
        """ Configuration of the fast counter.

        @param float bin_width_s: Length of a single time bin in the time trace
                                  histogram in seconds.
        @param float record_length_s: Total length of the timetrace/each single
                                      gate in seconds.
        @param int number_of_gates: optional, number of gates in the pulse
                                    sequence. Ignore for not gated counter.

        @return tuple(binwidth_s, gate_length_s, number_of_gates):
                    binwidth_s: float the actual set binwidth in seconds
                    gate_length_s: the actual set gate length in seconds
                    number_of_gates: the number of gated, which are accepted
        """
        # Do nothing if fast counter is running
        if self.statusvar >= 2:
            binwidth_s = self._binwidth / self._internal_clock_hz
            gate_length_s = self._gate_length_bins * binwidth_s
            return binwidth_s, gate_length_s, self._number_of_gates

        # set class variables
        self._binwidth = int(np.rint(bin_width_s * self._internal_clock_hz))

        # calculate the actual binwidth depending on the internal clock:
        binwidth_s = self._binwidth / self._internal_clock_hz

        self._gate_length_bins = int(np.rint(record_length_s / bin_width_s))
        gate_length_s = self._gate_length_bins * binwidth_s

        self._number_of_gates = number_of_gates

        self.statusvar = 1
        return binwidth_s, gate_length_s, number_of_gates
Пример #27
0
def correlation(VN,r,maxs,dmaxs,bins,N,ld):
    R = np.zeros(shape=(N,N))
    for i in range(N):
        for j in range(i):
            dx=r[i,0]-r[j,0]
            dy=r[i,1]-r[j,1]
            dz=r[i,2]-r[j,2]
            dx -= np.rint(dx / ld) * ld
            dy -= np.rint(dy / ld) * ld
            dz -= np.rint(dz / ld) * ld        
            drr = math.sqrt( dx * dx + dy * dy + dz * dz)
            R[i,j] = drr
            R[j,i] = drr

    #bins = 30
    #maxs = np.linspace(0,ld,num=bins)
    #dmaxs = maxs[1] - maxs[0]
    g = np.zeros(shape=(bins,))
    for i in range(1,bins):
        maxx = maxs[i]
        for j in range(N):
            for k in range(j):
                dist = 1/(4*math.pi*R[j,k]*R[j,k]*dmaxs)
                x = R[j,k] - maxx
                if (-1*dmaxs < (x) <0):
                    g[i-1] += 2*dist
            
    g = g*VN
    return g
Пример #28
0
 def __call__(self, t):
     if self.clock is None:
         raise ValueError('Can only call timed arrays if they are based on a clock.')
     else:
         if isinstance(t, (list, tuple)):
             t = numpy.array(t)
         if isinstance(t, neurongroup.TArray):
             # In this case, we know that t = ones(N)*t so we just use the first value
             t = t[0]
         elif isinstance(t, numpy.ndarray):
             if len(self.shape) > 2:
                 raise ValueError('Calling TimedArray with array valued t only supported for 1D or 2D TimedArray.')
             if len(self.shape) == 2 and len(t) != self.shape[1]:
                 raise ValueError('Calling TimedArray with array valued t on 2D TimedArray requires len(t)=arr.shape[1]')
             t = numpy.array(numpy.rint((t - self._t_init) / self._dt), dtype=int)
             t[t < 0] = 0
             t[t >= len(self.times)] = len(self.times) - 1
             if len(self.shape) == 1:
                 return numpy.asarray(self)[t]
             return numpy.asarray(self)[t, numpy.arange(len(t))]
         t = float(t)
         ot = t
         t = int(numpy.rint((t - self._t_init) / self._dt))
         if t < 0: t = 0
         if t >= len(self.times): t = len(self.times) - 1
         return numpy.asarray(self)[t]
Пример #29
0
def main():
    """Main Function"""
    # Read dat file and save pinned pixels
    if os.path.exists( datFile ):
        # Make output directory
        if not os.path.exists( resultsDir ):
            os.system( 'mkdir %s' % resultsDir )
            
        img = ReadDat ( datFile )
        np.array( img.pinned , dtype=bool ).tofile('%s/pinned.dat' % resultsDir )
        
        # Save details of image size and miniblock sizes
        f = open( '%s/array.log' % resultsDir , 'w' )
        f.write ( '%s\t%s\t%s\t%s' % ( img.rows , img.cols , img.miniR , img.miniC ) )
        f.close ( )
        
        # Empty chip beadfind analysis
        bf  = BeadFind ( img )
        np.array( bf['actpix'] , dtype=bool ).tofile('%s/actpix.dat' % resultsDir )
        
        # Note that these are only useful in the miniblock sizes 
        # 12 x 14 blocks (miniR = 111, miniC = 92)
        # For thumbnail, it's different, of course.  8 x 12 blocks of (miniR = 100 , miniC = 100)
        np.array( np.rint( bf['bfmat'] ) , dtype=np.dtype('i2') ).tofile('%s/ebfvals.dat' % resultsDir )
        np.array( np.rint( 10000 * bf['gains'] ) , dtype=np.dtype('i2') ).tofile('%s/gaincorr.dat' % resultsDir )
        
        # Buffering analysis
        bt  = BufferTest( img )
        
        np.array( np.rint( bt['slopes'] ) , dtype=np.dtype('i2') ).tofile('%s/slopes.dat' % resultsDir )
        np.array( bt['t0'] , dtype=np.dtype('i1') ).tofile('%s/t0.dat' % resultsDir )
        
    else:
        print('Error!  Acquisition file not found.  Please do not skip the calibration before loading.')
def value_diff(I, mu):
    '''we pass in a numpy array: with the R, G, B components being the innermost'''
    if len(mu.shape) == 1:
        mu = np.rint(mu[np.newaxis,np.newaxis,:]).astype(np.uint8)
    if len(I.shape) == 1:
        I = np.rint(I[np.newaxis,np.newaxis,:]).astype(np.uint8)
    return(np.squeeze(cv2.subtract(cv2.cvtColor(I, cv2.COLOR_BGR2HSV)[...,2], cv2.cvtColor(mu, cv2.COLOR_BGR2HSV)[...,2], dtype=cv2.CV_32S)))
Пример #31
0
def convert(image, dtype, force_copy=False, uniform=False):
    """
    Convert an image to the requested data-type.

    Warnings are issued in case of precision loss, or when negative values
    are clipped during conversion to unsigned integer types (sign loss).

    Floating point values are expected to be normalized and will be clipped
    to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or
    signed integers respectively.

    Numbers are not shifted to the negative side when converting from
    unsigned to signed integer types. Negative values will be clipped when
    converting to unsigned integers.

    Parameters
    ----------
    image : ndarray
        Input image.
    dtype : dtype
        Target data-type.
    force_copy : bool, optional
        Force a copy of the data, irrespective of its current dtype.
    uniform : bool, optional
        Uniformly quantize the floating point range to the integer range.
        By default (uniform=False) floating point values are scaled and
        rounded to the nearest integers, which minimizes back and forth
        conversion errors.

    References
    ----------
    .. [1] DirectX data conversion rules.
           http://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx
    .. [2] Data Conversions. In "OpenGL ES 2.0 Specification v2.0.25",
           pp 7-8. Khronos Group, 2010.
    .. [3] Proper treatment of pixels as integers. A.W. Paeth.
           In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990.
    .. [4] Dirty Pixels. J. Blinn. In "Jim Blinn's corner: Dirty Pixels",
           pp 47-57. Morgan Kaufmann, 1998.

    """
    image = np.asarray(image)
    dtypeobj_in = image.dtype
    dtypeobj_out = np.dtype(dtype)
    dtype_in = dtypeobj_in.type
    dtype_out = dtypeobj_out.type
    kind_in = dtypeobj_in.kind
    kind_out = dtypeobj_out.kind
    itemsize_in = dtypeobj_in.itemsize
    itemsize_out = dtypeobj_out.itemsize

    # Below, we do an `issubdtype` check.  Its purpose is to find out
    # whether we can get away without doing any image conversion.  This happens
    # when:
    #
    # - the output and input dtypes are the same or
    # - when the output is specified as a type, and the input dtype
    #   is a subclass of that type (e.g. `np.floating` will allow
    #   `float32` and `float64` arrays through)

    type_out = dtype if isinstance(dtype, type) else dtypeobj_out

    if np.issubdtype(dtypeobj_in, type_out):
        if force_copy:
            image = image.copy()
        return image

    if not (dtype_in in _supported_types and dtype_out in _supported_types):
        raise ValueError("Can not convert from {} to {}.".format(
            dtypeobj_in, dtypeobj_out))

    def sign_loss():
        warn("Possible sign loss when converting negative image of type "
             "{} to positive image of type {}.".format(dtypeobj_in,
                                                       dtypeobj_out))

    def prec_loss():
        warn("Possible precision loss when converting from {} to {}".format(
            dtypeobj_in, dtypeobj_out))

    def _dtype_itemsize(itemsize, *dtypes):
        # Return first of `dtypes` with itemsize greater than `itemsize`
        return next(dt for dt in dtypes if np.dtype(dt).itemsize >= itemsize)

    def _dtype_bits(kind, bits, itemsize=1):
        # Return dtype of `kind` that can store a `bits` wide unsigned int
        def compare(x, y, kind='u'):
            if kind == 'u':
                return x <= y
            else:
                return x < y

        s = next(i for i in (itemsize, ) + (2, 4, 8)
                 if compare(bits, i * 8, kind=kind))
        return np.dtype(kind + str(s))

    def _scale(a, n, m, copy=True):
        """Scale an array of unsigned/positive integers from `n` to `m` bits.

        Numbers can be represented exactly only if `m` is a multiple of `n`.

        Parameters
        ----------
        a : ndarray
            Input image array.
        n : int
            Number of bits currently used to encode the values in `a`.
        m : int
            Desired number of bits to encode the values in `out`.
        copy : bool, optional
            If True, allocates and returns new array. Otherwise, modifies
            `a` in place.

        Returns
        -------
        out : array
            Output image array. Has the same kind as `a`.
        """
        kind = a.dtype.kind
        if n > m and a.max() < 2**m:
            mnew = int(np.ceil(m / 2) * 2)
            if mnew > m:
                dtype = "int{}".format(mnew)
            else:
                dtype = "uint{}".format(mnew)
            n = int(np.ceil(n / 2) * 2)
            warn("Downcasting {} to {} without scaling because max "
                 "value {} fits in {}".format(a.dtype, dtype, a.max(), dtype))
            return a.astype(_dtype_bits(kind, m))
        elif n == m:
            return a.copy() if copy else a
        elif n > m:
            # downscale with precision loss
            prec_loss()
            if copy:
                b = np.empty(a.shape, _dtype_bits(kind, m))
                np.floor_divide(a,
                                2**(n - m),
                                out=b,
                                dtype=a.dtype,
                                casting='unsafe')
                return b
            else:
                a //= 2**(n - m)
                return a
        elif m % n == 0:
            # exact upscale to a multiple of `n` bits
            if copy:
                b = np.empty(a.shape, _dtype_bits(kind, m))
                np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
                return b
            else:
                a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize),
                             copy=False)
                a *= (2**m - 1) // (2**n - 1)
                return a
        else:
            # upscale to a multiple of `n` bits,
            # then downscale with precision loss
            prec_loss()
            o = (m // n + 1) * n
            if copy:
                b = np.empty(a.shape, _dtype_bits(kind, o))
                np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
                b //= 2**(o - m)
                return b
            else:
                a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize),
                             copy=False)
                a *= (2**o - 1) // (2**n - 1)
                a //= 2**(o - m)
                return a

    if kind_in in 'ui':
        imin_in = np.iinfo(dtype_in).min
        imax_in = np.iinfo(dtype_in).max
    if kind_out in 'ui':
        imin_out = np.iinfo(dtype_out).min
        imax_out = np.iinfo(dtype_out).max

    # any -> binary
    if kind_out == 'b':
        if kind_in in "fi":
            sign_loss()
        prec_loss()
        return image > dtype_in(dtype_range[dtype_in][1] / 2)

    # binary -> any
    if kind_in == 'b':
        result = image.astype(dtype_out)
        if kind_out != 'f':
            result *= dtype_out(dtype_range[dtype_out][1])
        return result

    # float -> any
    if kind_in == 'f':
        if np.min(image) < -1.0 or np.max(image) > 1.0:
            raise ValueError("Images of type float must be between -1 and 1.")
        if kind_out == 'f':
            # float -> float
            if itemsize_in > itemsize_out:
                prec_loss()
            return image.astype(dtype_out)

        # floating point -> integer
        prec_loss()
        # use float type that can represent output integer type
        computation_type = _dtype_itemsize(itemsize_out, dtype_in, np.float32,
                                           np.float64)

        if not uniform:
            if kind_out == 'u':
                image_out = np.multiply(image,
                                        imax_out,
                                        dtype=computation_type)
            else:
                image_out = np.multiply(image, (imax_out - imin_out) / 2,
                                        dtype=computation_type)
                image_out -= 1.0 / 2.
            np.rint(image_out, out=image_out)
            np.clip(image_out, imin_out, imax_out, out=image_out)
        elif kind_out == 'u':
            image_out = np.multiply(image,
                                    imax_out + 1,
                                    dtype=computation_type)
            np.clip(image_out, 0, imax_out, out=image_out)
        else:
            image_out = np.multiply(image, (imax_out - imin_out + 1.0) / 2.0,
                                    dtype=computation_type)
            np.floor(image_out, out=image_out)
            np.clip(image_out, imin_out, imax_out, out=image_out)
        return image_out.astype(dtype_out)

    # signed/unsigned int -> float
    if kind_out == 'f':
        if itemsize_in >= itemsize_out:
            prec_loss()

        # use float type that can exactly represent input integers
        computation_type = _dtype_itemsize(itemsize_in, dtype_out, np.float32,
                                           np.float64)

        if kind_in == 'u':
            # using np.divide or np.multiply doesn't copy the data
            # until the computation time
            image = np.multiply(image, 1. / imax_in, dtype=computation_type)
            # DirectX uses this conversion also for signed ints
            # if imin_in:
            #     np.maximum(image, -1.0, out=image)
        else:
            image = np.add(image, 0.5, dtype=computation_type)
            image *= 2 / (imax_in - imin_in)

        return np.asarray(image, dtype_out)

    # unsigned int -> signed/unsigned int
    if kind_in == 'u':
        if kind_out == 'i':
            # unsigned int -> signed int
            image = _scale(image, 8 * itemsize_in, 8 * itemsize_out - 1)
            return image.view(dtype_out)
        else:
            # unsigned int -> unsigned int
            return _scale(image, 8 * itemsize_in, 8 * itemsize_out)

    # signed int -> unsigned int
    if kind_out == 'u':
        sign_loss()
        image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out)
        result = np.empty(image.shape, dtype_out)
        np.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe')
        return result

    # signed int -> signed int
    if itemsize_in > itemsize_out:
        return _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out - 1)

    image = image.astype(_dtype_bits('i', itemsize_out * 8))
    image -= imin_in
    image = _scale(image, 8 * itemsize_in, 8 * itemsize_out, copy=False)
    image += imin_out
    return image.astype(dtype_out)
Пример #32
0
def save_mat( mat, info, args) :
    if os.path.exists( info[ "output"]) and not args.force and not is_same_path( info[ "output"], '/dev/stdout') :
        if not prompt( "File '" + info[ "output"] + "' already exists, overwrite?", True) :
            return
    if args.verbose :
        verbose( info)
    yuv_cs = "bt601"
    if args.output_yuv_color :
        yuv_cs = args.output_yuv_color
    yuv_fullrange = True
    if args.output_yuv_range and not args.output_yuv_range.startswith( "full") :
        yuv_fullrange = False
    if args.output_type == "jpg" :
        encode_image( info[ "output"], ".jpg", mat)
    elif args.output_type == "png" :
        if np.issubdtype( mat.dtype, np.floating) and info[ "origin_dtype"] == np.uint16:
            encode_image( info[ "output"], ".png", np.round( ( mat * 256)).astype( np.uint16))
        else:
            encode_image( info[ "output"], ".png", mat)
    elif args.output_type == "bmp" :
        encode_image( info[ "output"], ".bmp", mat)
    elif args.output_type == 'csv' :
        save_csv( mat, info)
    elif args.output_type == "u8" :
        if info[ "channel"] == 3 :
            mat = cv.cvtColor( mat, cv.COLOR_BGR2GRAY)
        elif info[ "channel"] == 4 :
            mat = cv.cvtColor( mat, cv.COLOR_BGRA2GRAY)
        else :
            mat = np.uint8( np.rint( mat))
        # mat.tofile( info[ "output"])
        mat_2_file_or_stdout( mat, info[ "output"])
    elif args.output_type == "u16" :
        if info[ "channel"] == 3 :
            mat = cv.cvtColor( mat, cv.COLOR_BGR2GRAY)
        elif info[ "channel"] == 4 :
            mat = cv.cvtColor( mat, cv.COLOR_BGRA2GRAY)
        mat = np.uint16( np.rint( mat * 256))
        # mat.tofile( info[ "output"])
        mat_2_file_or_stdout( mat, info[ "output"])
    elif args.output_type == "u32":
        if info[ "channel"] == 3 :
            mat = cv.cvtColor( mat, cv.COLOR_BGR2GRAY)
        elif info[ "channel"] == 4 :
            mat = cv.cvtColor( mat, cv.COLOR_BGRA2GRAY)
        mat = np.uint32( np.rint( mat * 16843009))
        # mat.tofile( info[ "output"])
        mat_2_file_or_stdout( mat, info[ "output"])
    elif args.output_type == "f32" :
        if info[ "channel"] == 3 :
            mat = cv.cvtColor( mat, cv.COLOR_BGR2GRAY)
        elif info[ "channel"] == 4 :
            mat = cv.cvtColor( mat, cv.COLOR_BGRA2GRAY)
        mat = mat.astype( np.float32) / 255.0
        # mat.tofile( info[ "output"])
        mat_2_file_or_stdout( mat, info[ "output"])
    elif args.output_type == "bgr" :
        if info[ "channel"] == 1 :
            mat = np.uint8( np.rint( mat))
            mat = cv.cvtColor( mat, cv.COLOR_GRAY2BGR)
        elif info[ "channel"] == 4 :
            mat = mat[ :, :, :3]
        # mat.tofile( info[ "output"])
        mat_2_file_or_stdout( mat, info[ "output"])
    elif args.output_type == "rgb" :
        if info[ "channel"] == 1 :
            mat = np.uint8( np.rint( mat))
            mat = cv.cvtColor( mat, cv.COLOR_GRAY2BGR)
        elif info[ "channel"] == 4 :
            mat = mat[ :, :, :3]
        mat = cv.cvtColor( mat, cv.COLOR_BGR2RGB)
        # mat.tofile( info[ "output"])
        mat_2_file_or_stdout( mat, info[ "output"])
    elif args.output_type == "rgba" :
        if info[ "channel"] == 1 :
            mat = np.uint8( np.rint( mat))
            mat = cv.cvtColor( mat, cv.COLOR_GRAY2BGRA)
        elif info[ "channel"] == 3 :
            mat = cv.cvtColor( mat, cv.COLOR_BGR2BGRA)
        mat = cv.cvtColor( mat, cv.COLOR_BGRA2RGBA)
        # mat.tofile( info[ "output"])
        mat_2_file_or_stdout( mat, info[ "output"])
    elif args.output_type == "bgra" :
        if info[ "channel"] == 1 :
            mat = np.uint8( np.rint( mat))
            mat = cv.cvtColor( mat, cv.COLOR_GRAY2BGRA)
        elif info[ "channel"] == 3 :
            mat = cv.cvtColor( mat, cv.COLOR_BGR2BGRA)
        # mat.tofile( info[ "output"])
        mat_2_file_or_stdout( mat, info[ "output"])
    elif args.output_type == "yuv" :
        if info[ "channel"] == 1 :
            mat = np.uint8( np.rint( mat))
            mat = cv.cvtColor( mat, cv.COLOR_GRAY2BGR)
        save_bgr2yuv( mat, yuv_cs, yuv_fullrange, info)
    elif args.output_type == "nv21" :
        h = info[ "height"]
        w = info[ "width"]
        if h % 2 != 0 or w % 2 != 0 :
            print( "Error: cannot save nv21 image with height = " + str( h) + " and width = " + str( w), file = sys.stderr)
            return
        if info[ "channel"] == 1 :
            mat = np.uint8( np.rint( mat))
            mat = cv.cvtColor( mat, cv.COLOR_GRAY2BGR)
        save_bgr2nv21( mat, yuv_cs, yuv_fullrange, info)
    elif args.output_type == "nv12" :
        h = info[ "height"]
        w = info[ "width"]
        if h % 2 != 0 or w % 2 != 0 :
            print( "Error: cannot save nv12 image with height = " + str( h) + " and width = " + str( w), file = sys.stderr)
            return
        if info[ "channel"] == 1 :
            mat = np.uint8( np.rint( mat))
            mat = cv.cvtColor( mat, cv.COLOR_GRAY2BGR)
        save_bgr2nv12( mat, yuv_cs, yuv_fullrange, info)
Пример #33
0
con1 = np.array([[1, 2], [3, 4]])
con2 = np.array([[5, 6], [7, 8]])
con3 = np.concatenate((con1, con2))
con4 = np.concatenate((con1, con2), axis=1)

#
cost1 = np.pi
cost2 = np.e
cost3 = np.NaN
cost4 = np.Inf

# arrotondamenti
arr1 = np.array([1.1, 1.5, 1.930999])
arr2 = np.floor(arr1)
arr3 = np.ceil(arr1)
arr4 = np.rint(arr1)
arr5 = np.round(arr1, 2)

# funzioni standard
fun1 = np.array([2, 4, 3])
fun2 = fun1.sum()
fun3 = fun1.prod()
fun4 = np.sum(fun1)
fun5 = np.prod(fun1)

stat = np.array([2, 1, 9])
media = stat.mean()
media1 = np.mean(stat)
varia = stat.var()
varia1 = np.var(stat)
sig = stat.std()
Пример #34
0
def simulate_map(dt=0.04,
                 length_ra=120.,
                 length_dec=120.,
                 speed=4.,
                 spacing=0.5,
                 count_map=None,
                 noise_amplitude=1.,
                 width_ra=None,
                 width_dec=None,
                 outdir='sim/',
                 baseline="flat",
                 mean_ra=180,
                 mean_dec=70,
                 srcname='Dummy',
                 channel_ratio=1):
    """Simulate a map.

    Parameters
    ----------
    dt : float
        The integration time in seconds
    length : float
        Length of the scan in arcminutes
    speed : float
        Speed of the scan in arcminutes / second
    shape : function
        Function that describes the shape of the scan. If None, a
        constant scan is assumed. The zero point of the scan is in the
        *center* of it
    noise_amplitude : float
        Noise level in counts
    spacing : float
        Spacing between scans, in arcminutes
    baseline : str
        "flat", "slope" (linearly increasing/decreasing), "messy"
        (random walk) or a number (which gives an amplitude to the random-walk
        baseline, that is 20 for "messy").
    count_map : function
        Flux distribution function, centered on zero
    outdir : str or iterable (str, str)
        If a single string, put all files in that directory; if two strings,
        put RA and DEC scans in the two directories.
    """

    if isinstance(outdir, six.string_types):
        outdir = (outdir, outdir)
    outdir_ra = outdir[0]
    outdir_dec = outdir[1]

    mkdir_p(outdir_ra)
    mkdir_p(outdir_dec)

    if count_map is None:
        count_map = _default_map_shape

    if baseline == "flat":
        mmin = mmax = 0
        qmin = qmax = 0
        stochastic_amp = 0
    elif baseline == "slope":
        mmin, mmax = -5, 5
        qmin, qmax = 0, 150
        stochastic_amp = 0
    elif baseline == "messy":
        mmin, mmax = 0, 0
        qmin, qmax = 0, 0
        stochastic_amp = 20
    elif _is_number(baseline):
        mmin, mmax = 0, 0
        qmin, qmax = 0, 0
        stochastic_amp = float(baseline)
    else:
        raise ValueError("baseline has to be 'flat', 'slope', 'messy' or a "
                         "number")

    nbins_ra = np.int(np.rint(length_ra / speed / dt))
    nbins_dec = np.int(np.rint(length_dec / speed / dt))

    times_ra = np.arange(nbins_ra) * dt
    times_dec = np.arange(nbins_dec) * dt

    ra_array = np.arange(-nbins_ra / 2,
                         nbins_ra / 2) / nbins_ra * length_ra / 60
    dec_array = np.arange(-nbins_dec / 2,
                          nbins_dec / 2) / nbins_dec * length_dec / 60
    # In degrees!
    if width_dec is None:
        width_dec = length_dec
    if width_ra is None:
        width_ra = length_ra
    # Dec scans
    if HAS_MPL:
        fig = plt.figure()

    delta_decs = np.arange(-width_dec / 2, width_dec / 2 + spacing,
                           spacing) / 60
    print("Simulating dec scans...")
    for i_d, delta_dec in enumerate(tqdm(delta_decs)):

        start_dec = mean_dec + delta_dec
        m = ra.uniform(mmin, mmax)
        q = ra.uniform(qmin, qmax)
        signs = np.random.choice([-1, 1], nbins_ra)
        stochastic = \
            np.cumsum(signs) * stochastic_amp / np.sqrt(nbins_ra)

        baseline = m * ra_array + q + stochastic
        counts = count_map(ra_array, delta_dec) + \
            ra.normal(0, noise_amplitude, ra_array.shape) + \
            baseline

        actual_ra = mean_ra + ra_array / np.cos(np.radians(start_dec))

        if i_d % 2 != 0:
            actual_ra = actual_ra[::-1]
        save_scan(times_ra,
                  actual_ra,
                  np.zeros_like(actual_ra) + start_dec, {
                      'Ch0': counts,
                      'Ch1': counts * channel_ratio
                  },
                  filename=os.path.join(outdir_ra, 'Ra{}.fits'.format(i_d)),
                  src_ra=mean_ra,
                  src_dec=mean_dec,
                  srcname=srcname)
        if HAS_MPL:
            plt.plot(ra_array, counts)

    if HAS_MPL:
        fig.savefig(os.path.join(outdir_ra, "allscans_ra.png"))
        plt.close(fig)

        fig = plt.figure()
    delta_ras = np.arange(-width_ra / 2, width_ra / 2 + spacing, spacing) / 60
    print("Simulating RA scans...")
    # RA scans
    for i_r, delta_ra in enumerate(tqdm(delta_ras)):
        start_ra = delta_ra / np.cos(np.radians(mean_dec)) + mean_ra
        m = ra.uniform(mmin, mmax)
        q = ra.uniform(qmin, qmax)

        signs = np.random.choice([-1, 1], nbins_dec)
        stochastic = \
            np.cumsum(signs) * stochastic_amp / np.sqrt(nbins_dec)

        baseline = m * dec_array + q + stochastic
        counts = count_map(delta_ra, dec_array) + \
            ra.normal(0, noise_amplitude, dec_array.shape) + \
            baseline

        if i_r % 2 != 0:
            dec_array = dec_array[::-1]
        save_scan(times_dec,
                  np.zeros_like(dec_array) + start_ra,
                  dec_array + mean_dec, {
                      'Ch0': counts,
                      'Ch1': counts * channel_ratio
                  },
                  filename=os.path.join(outdir_dec, 'Dec{}.fits'.format(i_r)),
                  src_ra=mean_ra,
                  src_dec=mean_dec,
                  srcname=srcname)

        if HAS_MPL:
            plt.plot(dec_array, counts)

    if HAS_MPL:
        fig.savefig(os.path.join(outdir_dec, "allscans_dec.png"))
        plt.close(fig)
Пример #35
0
    def _convert_features_to_array_coords(self, depth_precision):
        # prepare some lists with:
        # - the geographic position of the features (self.valsou_geo)
        # - the local (projected) position of the features (self.valsou_loc)
        # - the array-base coords of the features (self.valsou_array)
        # - the closest array nodes for each feature (self.valsou_closest)
        # - a boolean list of visited features (self.valsou_visited)

        # logger.debug("converting features to array coords ...")

        # first retrieve [long, lat, depth]
        self.valsou_geo = list()
        for feature in self.valsou_features:

            # retrieve depth value
            s57_valsou = None
            for attr in feature.attributes:

                if attr.acronym == 'VALSOU':
                    # invert sign due to the CSAR/BAG convention (depths are negative)
                    s57_valsou = -float(attr.value)
                    # logger.debug("VALSOU value: %f" % s57_valsou)
                    continue

            # append [long, lat, depth]
            self.valsou_geo.append(
                [feature.centroid.x, feature.centroid.y, s57_valsou])
        # logger.debug("lon, lat, d: %s" % self.valsou_geo)

        # store the coordinate transform from CSAR CRS to geo (using GDAL)
        try:
            osr_grid = osr.SpatialReference()
            # logger.debug("cur_grids: %s" % self.grids.cur_grids)
            osr_grid.ImportFromWkt(self.grids.cur_grids.bbox().hrs)
            osr_geo = osr.SpatialReference()
            osr_geo.ImportFromEPSG(4326)  # geographic WGS84
            # self.loc2geo = osr.CoordinateTransformation(osr_bag, osr_geo)
            self.geo2loc = osr.CoordinateTransformation(osr_geo, osr_grid)

        except Exception as e:
            raise RuntimeError(
                "unable to create a valid coords transform: %s" % e)

        # convert s57 features to grid CRS coords
        self.valsou_utm = np.array(
            self.geo2loc.TransformPoints(np.array(self.valsou_geo,
                                                  np.float64)), np.float64)
        # logger.debug("x, y, z: %s" % self.valsou_loc)

        # create a list to flag the visited features
        self.valsou_visited = [False] * len(self.valsou_utm)
        # logger.debug("visited: %s" % self.valsou_visited)

        # convert feature to array coords
        valsou_array = np.copy(self.valsou_utm)
        valsou_array[:, 0] = (self.valsou_utm[:, 0] - self.grids.cur_grids.bbox().transform[0]) \
                             / self.grids.cur_grids.bbox().transform[1] - 0.5
        valsou_array[:, 1] = (self.valsou_utm[:, 1] - self.grids.cur_grids.bbox().transform[3]) \
                             / self.grids.cur_grids.bbox().transform[5] - 0.5
        # logger.debug("array: %s" % self.valsou_array)

        # convert to the closest array coordinates
        self.valsou_closest = np.empty_like(valsou_array)
        self.valsou_closest[:, 0] = np.rint(valsou_array[:, 0])
        self.valsou_closest[:, 1] = np.rint(valsou_array[:, 1])
        self.valsou_closest[:, 2] = np.around(valsou_array[:, 2],
                                              decimals=depth_precision)
Пример #36
0
    def _return(self, charge):
        if self.type == int:
            charge = np.rint(charge).astype(int)

        return charge[0] if self.scalar else charge
Пример #37
0
    def _return(self, mass):
        if self.type == int:
            mass = np.rint(mass).astype(int)

        return mass[0] if self.scalar else mass
Пример #38
0
def gaussian_noise(image, std_dev):
    noise = np.rint(
        np.random.normal(loc=0.0, scale=std_dev, size=np.shape(image)))
    return Image.fromarray(np.clip(image + noise, 0, 255).astype(np.uint8))
Пример #39
0
    def generate_routefile(self, seed):

        if seed >= 0:
            np.random.seed(seed)  # make tests reproducible

        # the generation of cars is distributed according to a weibull distribution
        timings = np.random.weibull(2, self._n_cars_generated)
        timings = np.sort(timings)

        # reshape the distribution to fit the interval 0:max_steps
        car_gen_steps = []
        min_old = math.floor(timings[1])
        max_old = math.ceil(timings[-1])
        min_new = 0
        max_new = self._max_steps
        for value in timings:
            car_gen_steps = np.append(car_gen_steps, ((max_new - min_new) /
                                                      (max_old - min_old)) *
                                      (value - min_old) + min_new)

        car_gen_steps = np.rint(
            car_gen_steps
        )  # round every value to int -> effective steps when a car will be generated

        # produce the file for cars generation, one car per line
        with open("intersection/trips.trips.4L.xml", "w") as routes:
            print("""<routes>
            <vType accel="1.0" decel="4.5" id="standard_car" length="5.0" minGap="2.5" maxSpeed="25" sigma="0.5" />

            <route id="W_N" edges="W2TL TL2N"/>
            <route id="W_E" edges="W2TL TL2E"/>
            <route id="W_S" edges="W2TL TL2S"/>
            <route id="N_W" edges="N2TL TL2W"/>
            <route id="N_E" edges="N2TL TL2E"/>
            <route id="N_S" edges="N2TL TL2S"/>
            <route id="E_W" edges="E2TL TL2W"/>
            <route id="E_N" edges="E2TL TL2N"/>
            <route id="E_S" edges="E2TL TL2S"/>
            <route id="S_W" edges="S2TL TL2W"/>
            <route id="S_N" edges="S2TL TL2N"/>
            <route id="S_E" edges="S2TL TL2E"/>""",
                  file=routes)

            for car_counter, step in enumerate(car_gen_steps):
                straight_or_turn = np.random.uniform()
                if straight_or_turn < 0.75:  # choose direction: straight or turn - 75% of times the car goes straight
                    route_straight = np.random.randint(
                        1, 5)  # choose a random source & destination
                    if route_straight == 1:
                        print(
                            '    <vehicle id="W_E_%i" type="standard_car" route="W_E" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)
                    elif route_straight == 2:
                        print(
                            '    <vehicle id="E_W_%i" type="standard_car" route="E_W" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)
                    elif route_straight == 3:
                        print(
                            '    <vehicle id="N_S_%i" type="standard_car" route="N_S" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)
                    else:
                        print(
                            '    <vehicle id="S_N_%i" type="standard_car" route="S_N" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)
                else:  # car that turn -25% of the time the car turns
                    route_turn = np.random.randint(
                        1, 9)  # choose random source source & destination
                    if route_turn == 1:
                        print(
                            '    <vehicle id="W_N_%i" type="standard_car" route="W_N" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)
                    elif route_turn == 2:
                        print(
                            '    <vehicle id="W_S_%i" type="standard_car" route="W_S" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)
                    elif route_turn == 3:
                        print(
                            '    <vehicle id="N_W_%i" type="standard_car" route="N_W" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)
                    elif route_turn == 4:
                        print(
                            '    <vehicle id="N_E_%i" type="standard_car" route="N_E" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)
                    elif route_turn == 5:
                        print(
                            '    <vehicle id="E_N_%i" type="standard_car" route="E_N" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)
                    elif route_turn == 6:
                        print(
                            '    <vehicle id="E_S_%i" type="standard_car" route="E_S" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)
                    elif route_turn == 7:
                        print(
                            '    <vehicle id="S_W_%i" type="standard_car" route="S_W" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)
                    elif route_turn == 8:
                        print(
                            '    <vehicle id="S_E_%i" type="standard_car" route="S_E" depart="%s" departLane="random" departSpeed="10" />'
                            % (car_counter, step),
                            file=routes)

            print("</routes>", file=routes)
            sitk_maskimg.SetSpacing(spacing)
            sitk_maskimg.SetOrigin(origin)
            # substring of input file path ang save output mask file
            sub_img_file = img_file[len(luna_subset_path):-4]
            sitk.WriteImage(sitk_maskimg, luna_subset_mask_path + sub_img_file + "_segmentation.mhd")
        if mini_df.shape[0] > 0:
            # set out mask data once
            mask_itk = np.zeros(shape=(num_z, height, width), dtype=np.float)
            # go through all nodes in one series image
            for node_idx, cur_row in mini_df.iterrows():
                node_x = cur_row["coordX"]
                node_y = cur_row["coordY"]
                node_z = cur_row["coordZ"]
                diam = cur_row["diameter_mm"]
                center = np.array([node_x, node_y, node_z])
                # nodule center
                v_center = np.rint((center - origin) / spacing)
                # nodule diam
                v_diam = np.rint((diam - origin) / spacing)
                # convert x,y,z order v_center to z,y,z order v_center
                v_center[0], v_center[1], v_center[2] = v_center[2], v_center[1], v_center[0]
                make_mask(mask_itk, v_center, diam)

            mask_itk = np.uint8(mask_itk * 255.)
            mask_itk = np.clip(mask_itk, 0, 255).astype('uint8')
            sitk_maskimg = sitk.GetImageFromArray(mask_itk)
            sitk_maskimg.SetSpacing(spacing)
            sitk_maskimg.SetOrigin(origin)
            sub_img_file = img_file[len(luna_subset_path):-4]
            sitk.WriteImage(sitk_maskimg, luna_subset_mask_path + sub_img_file + "_segmentation.mhd")
Пример #41
0
    def fastS4(self, a = 1.0, dt = 0.002, X4timeset = 0, ppp = [1,1,1], outputfile = ''):
        """ Compute four-point dynamic structure factor at peak timescale of dynamic susceptibility

            Based on overlap function Qt and its corresponding dynamic susceptibility QtX4     
            a is the cutoff for the overlap function, default is 1.0 (EAM) and 0.3(LJ) (0.3<d>)
            dt is the timestep of MD simulations
            X4timeset is the peaktime scale of X4, if 0 will use the calculated one
            Dynamics should be calculated before computing S4
            Only considered the particles which are fast
            The Qt and X4 should be calculated first
            ppp is the periodic boundary conditions
        """
        print ('-----------------Compute dynamic S4(q) of fast particles --------------')

        #-----------calculte overall dynamics first----------------
        results = np.zeros(((self.SnapshotNumber - 1), 3))
        names  = 't  Qt  QtX4'
        
        cal_Qt   = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
        deltat   = np.zeros(((self.SnapshotNumber - 1), 2), dtype = np.int) #deltat, deltatcounts
        for n in range(self.SnapshotNumber - 1):  #time interval
            RII = self.Positions[n + 1:] - self.Positions[n]
            hmatrixinv = np.linalg.inv(self.hmatrix[n])
            for ii in range(len(RII)):
                matrixij = np.dot(RII[ii], hmatrixinv)
                RII[ii]  = np.dot(matrixij - np.rint(matrixij) * ppp, self.hmatrix[n]) #remove PBC

            distance  = np.square(RII).sum(axis = 2)
            RII_Qt    = (np.sqrt(distance) >= a).sum(axis = 1)
            cal_Qt    = pd.concat([cal_Qt, pd.DataFrame(RII_Qt[np.newaxis, :])])
        
        cal_Qt       = cal_Qt.iloc[1:]
        deltat[:, 0] = np.array(cal_Qt.columns) + 1 #Timeinterval
        deltat[:, 1] = np.array(cal_Qt.count())     #Timeinterval frequency

        results[:, 0] = deltat[:, 0] * self.TimeStep * dt 
        results[:, 1] = cal_Qt.mean() / self.ParticleNumber
        results[:, 2] = ((cal_Qt**2).mean() - (cal_Qt.mean())**2) / self.ParticleNumber
        if outputfile:
            np.savetxt('Dynamics.' + outputfile, results, fmt='%.6f', header = names, comments = '')

        #-----------calculte S4(q) of fast particles----------------
        twopidl = 2 * pi / self.Boxlength[0]
        if self.Boxlength[0] <= 40.0:
            Numofq = int(self.Boxlength[0] / twopidl)

        elif self.Boxlength[0] <= 80.0:
            Numofq = int(self.Boxlength[0] / 2 / twopidl)

        elif self.Boxlength[0] > 80.0:
            Numofq = int(self.Boxlength[0] / 4 / twopidl)

        wavevector = choosewavevector(Numofq, self.ndim) #Only S4(q) at low wavenumber range is interested
        qvalue, qcount = np.unique(wavevector[:, 0], return_counts = True)
        sqresults = np.zeros((len(wavevector[:, 0]), 3)) #the first row accouants for wavenumber

        if X4timeset:
            X4time = int(X4timeset / dt / self.TimeStep)
        else:
            X4time = deltat[results[:, 2].argmax(), 0] 

        for n in range(self.SnapshotNumber - X4time):
            RII = self.Positions[n + X4time] - self.Positions[n]
            hmatrixinv = np.linalg.inv(self.hmatrix[n])
            matrixij   = np.dot(RII, hmatrixinv)
            RII        = np.dot(matrixij - np.rint(matrixij) * ppp, self.hmatrix[n]) #remove PBC

            #RII = np.square(RII).sum(axis = 1)
            #RII = np.where(np.sqrt(RII) >= a, 1, 0)
            RII = np.linalg.norm(RII, axis = 1)
            RII = np.where(RII >= a, 1, 0)
            sqtotal = np.zeros((len(wavevector[:, 0]), 2))
            for i in range(self.ParticleNumber):
                medium   = twopidl * (self.Positions[n][i] * wavevector[:, 1:]).sum(axis = 1)
                sqtotal[:, 0] += np.sin(medium) * RII[i]
                sqtotal[:, 1] += np.cos(medium) * RII[i]
            
            sqresults[:, 1] += np.square(sqtotal).sum(axis = 1) / self.ParticleNumber
            sqresults[:, 2] += sqtotal[:, 1]

        sqresults[:, 0]  = wavevector[:, 0]
        sqresults[:, 1]  = sqresults[:, 1] / (self.SnapshotNumber - X4time)
        sqresults[:, 2]  = np.square(sqresults[:, 2] / (self.SnapshotNumber - X4time)) / self.ParticleNumber

        sqresults = pd.DataFrame(sqresults)
        results   = np.array(sqresults.groupby(sqresults[0]).mean())
        results[:, 1] = results[:, 0] - results[:, 1] / qcount

        qvalue    = twopidl * np.sqrt(qvalue)
        results   = np.column_stack((qvalue, results))
        names = 'q  S4a(q)  S4b(q)'
        if outputfile:
            np.savetxt(outputfile, results, fmt='%.6f', header = names, comments = '')

        print ('--------- Compute S4(q) of fast particles over ------')
        return results, names
Пример #42
0
def roc_auc_score_FIXED(y_true, y_pred):
    if len(np.unique(y_true)) == 1 or len(
            np.unique(y_true)) == 0:  # bug in roc_auc_score
        return accuracy_score(y_true, np.rint(y_pred))
    return roc_auc_score(y_true, y_pred)
Пример #43
0
    def partial(self, qmax, a = 1.0, dt = 0.002, ppp = [1,1,1], outputfile = ''):
        """ Compute self-intermediate scattering functions ISF, dynamic susceptibility ISFX4 based on ISF
            Overlap function Qt and its corresponding dynamic susceptibility QtX4
            Mean-square displacements msd; non-Gaussion parameter alpha2
        
            qmax is the wavenumber corresponding to the first peak of structure factor
            qmax accounts for six components so it is a list
            a is the cutoff for the overlap function, default is 1.0 (EAM) and 0.3(LJ) (0.3<d>)
            dt is the timestep of MD simulations
            ppp is periodic boundary conditions
        """
        print ('-----------------Compute Partial Dynamics--------------------')

        partialresults = [] #a list containing results of all particle types           
        for i in self.Type:  #loop over different particle types
            #TYPESET = np.where(np.array(self.ParticleType) == i, 1, 0).astype(np.int)
            TYPESET = [j == i for j in self.ParticleType]

            results = np.zeros(((self.SnapshotNumber - 1), 7))
            names  = 't  ISF  ISFX4  Qt  QtX4  msd  alpha2'
            
            cal_isf  = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
            cal_Qt   = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
            cal_msd  = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
            cal_alp  = pd.DataFrame(np.zeros((self.SnapshotNumber-1))[np.newaxis, :])
            deltat   = np.zeros(((self.SnapshotNumber - 1), 2), dtype = np.int) #deltat, deltatcounts
            for n in range(self.SnapshotNumber - 1):  #loop over time intervals
                #RII    = self.Positions[n + 1:] - self.Positions[n]
                RII  = [ii[TYPESET[n]] - self.Positions[n][TYPESET[n]] for ii in self.Positions[n+1:]]
                hmatrixinv = np.linalg.inv(self.hmatrix[n])
                for ii in range(len(RII)):
                    matrixij = np.dot(RII[ii], hmatrixinv)
                    RII[ii]  = np.dot(matrixij - np.rint(matrixij) * ppp, self.hmatrix[n]) #remove PBC

                RII       = np.array(RII)
                RII_isf   = ((np.cos(RII * qmax[i - 1]).mean(axis = 2))).sum(axis = 1) #index is timeinterval -1
                cal_isf   = pd.concat([cal_isf, pd.DataFrame(RII_isf[np.newaxis, :])])
                distance  = np.square(RII).sum(axis = 2)
                RII_Qt    = ((np.sqrt(distance) <= a)).sum(axis = 1)
                cal_Qt    = pd.concat([cal_Qt, pd.DataFrame(RII_Qt[np.newaxis, :])])
                cal_msd   = pd.concat([cal_msd, pd.DataFrame(distance.sum(axis = 1)[np.newaxis, :])])
                distance2 = (np.square(distance)).sum(axis = 1)
                cal_alp   = pd.concat([cal_alp, pd.DataFrame(distance2[np.newaxis, :])])
        
            cal_isf      = cal_isf.iloc[1:]
            cal_Qt       = cal_Qt.iloc[1:]
            cal_msd      = cal_msd.iloc[1:]
            cal_alp      = cal_alp.iloc[1:]
            deltat[:, 0] = np.array(cal_isf.columns) + 1 #Timeinterval
            deltat[:, 1] = np.array(cal_isf.count())     #Timeinterval frequency

            results[:, 0] = deltat[:, 0] * self.TimeStep * dt 
            results[:, 1] = cal_isf.mean() / self.TypeNumber[i - 1]
            results[:, 2] = ((cal_isf**2).mean() - (cal_isf.mean())**2) / self.TypeNumber[i - 1]
            results[:, 3] = cal_Qt.mean() / self.TypeNumber[i - 1]
            results[:, 4] = ((cal_Qt**2).mean() - (cal_Qt.mean())**2) / self.TypeNumber[i - 1]
            results[:, 5] = cal_msd.mean() / self.TypeNumber[i - 1]
            results[:, 6] = cal_alp.mean() / self.TypeNumber[i - 1]
            results[:, 6] = alpha2factor(self.ndim) * results[:, 6] / np.square(results[:, 5]) - 1.0

            if outputfile:
                np.savetxt('Type' + str(i) + '.' + outputfile, results, fmt='%.6f', header = names, comments = '')
            
            partialresults.append(results)

        print ('-----------------Compute Partial Dynamics Over--------------------')
        return partialresults, names
Пример #44
0
def trim_cell(relative_axes, cell, symprec):
    """
    relative_axes: relative axes to supercell axes
    Trim positions outside relative axes

    """
    positions = cell.get_scaled_positions()
    numbers = cell.get_atomic_numbers()
    masses = cell.get_masses()
    #magmoms = cell.get_magnetic_moments()
    lattice = cell.get_cell()
    trimed_lattice = np.dot(relative_axes.T, lattice)

    trimed_positions = []
    trimed_numbers = []
    if masses is None:
        trimed_masses = None
    else:
        trimed_masses = []
    # if magmoms is None:
    #    trimed_magmoms = None
    # else:
    #    trimed_magmoms = []
    extracted_atoms = []

    positions_in_new_lattice = np.dot(positions,
                                      np.linalg.inv(relative_axes).T)
    positions_in_new_lattice -= np.floor(positions_in_new_lattice)
    trimed_positions = np.zeros_like(positions_in_new_lattice)
    num_atom = 0

    mapping_table = np.arange(len(positions), dtype='intc')
    symprec2 = symprec**2
    for i, pos in enumerate(positions_in_new_lattice):
        is_overlap = False
        if num_atom > 0:
            diff = trimed_positions[:num_atom] - pos
            diff -= np.rint(diff)
            # Older numpy doesn't support axis argument.
            # distances = np.linalg.norm(np.dot(diff, trimed_lattice), axis=1)
            # overlap_indices = np.where(distances < symprec)[0]
            distances2 = np.sum(np.dot(diff, trimed_lattice)**2, axis=1)
            overlap_indices = np.where(distances2 < symprec2)[0]
            if len(overlap_indices) > 0:
                is_overlap = True
                mapping_table[i] = extracted_atoms[overlap_indices[0]]

        if not is_overlap:
            trimed_positions[num_atom] = pos
            num_atom += 1
            trimed_numbers.append(numbers[i])
            if masses is not None:
                trimed_masses.append(masses[i])
            # if magmoms is not None:
            #    trimed_magmoms.append(magmoms[i])
            extracted_atoms.append(i)

    trimed_cell = Atoms(
        numbers=trimed_numbers,
        masses=trimed_masses,
        # magmoms=trimed_magmoms,
        scaled_positions=trimed_positions[:num_atom],
        cell=trimed_lattice,
    )

    return trimed_cell, extracted_atoms, mapping_table
def prepare_displacement_matrices_homogeneous(A1,
                                              b1,
                                              A2,
                                              b2,
                                              displacement=None):
    """Compute matrices used for displacement estimation as defined by equations
(7.32) and (7.33) in Gunnar Farnebäck's thesis "Polynomial Expansion for
Orientation and Motion Estimation". Here we suppose an homogenous translation.

A1,b1: Local polynomial expension coefficients at time 1. A1 is a N+2
dimensional array, where the first N indices indicates the position in the
signal and the last two contains the matrix for each point. In the same way, b1
is a N+1 dimensional array. Such arrays can be obtained via QuadraticToAbc.

A2,b2: Local polynomial expension coefficients at time 2.

displacement: The global translation vector from time 1 to time 2.

----
Returns

A: Advected average of A1 and A2 matrices (Eq. 7.32)

Delta_b: advected difference of b2 and b1 (Eq. 7.33)
"""
    assert A1.shape == A2.shape
    assert A1.shape[:-1] == b1.shape
    assert A1.shape[-1] == b1.shape[-1]
    assert b1.shape == b2.shape
    shape = A1.shape[:-2]
    # N is the dimensionality of the signal we consider here (it might be
    # an hyperplane of the original signal), not the rank of the matrices and
    # vectors.
    N = len(shape)
    if displacement is None:
        displacement = np.zeros(N, dtype=A1.dtype)
    assert displacement.shape == (N, )
    # Integral part of the backward displacement vector
    displ = -np.rint(displacement).astype(np.int64)
    # Advect back A2 and b2 by rolling
    A = np.roll(A2, displ, axis=tuple(range(N)))
    Delta_b = -0.5 * np.roll(b2, displ, axis=tuple(range(N)))
    #take care of the margins by repeating the last available element of A2 or b2
    for dim, d in enumerate(displ):
        if d >= 0:
            # Use only A1 where A2 is not available
            A[(slice(None, None), ) * dim +
              (slice(0, d), )] = A1[(slice(None, None), ) * dim +
                                    (slice(0, d), )]
            # Use only b1 where b2 is not available (aims for a displacement equal to the homogeneous input)
            Delta_b[(slice(None, None), ) * dim +
                    (slice(0, d), )] = -0.5 * b1[(slice(None, None), ) * dim +
                                                 (slice(0, d), )]
            # Use the last available element of b2
            #Delta_b[(slice(None,None),)*dim + (slice(0,d),)] = -b2[(slice(None,None),)*dim + (slice(0,1),)]
        else:
            # Use only A1 where A2 is not available
            A[(slice(None, None), ) * dim +
              (slice(d, None), )] = A1[(slice(None, None), ) * dim +
                                       (slice(d, None), )]
            # Use only b1 where b2 is not available (aims for a displacement equal to the homogeneous input)
            Delta_b[(slice(None, None), ) * dim +
                    (slice(d, None), )] = -0.5 * b1[
                        (slice(None, None), ) * dim + (slice(d, None), )]
            # Use the last available element of b2
            #Delta_b[(slice(None,None),)*dim + (slice(-d,None),)] = -0.5*b2[(slice(None,None),)*dim + (slice(-1,None),)]
    #Advected average for A1 and A2
    A += A1
    A *= 0.5
    # Advected difference for b1 and b2, to which we add back the forward
    # rounded a priori displacement. Here we have to expand the displacement
    # vector to the same rank as the original signal dimension.
    df = np.zeros(A1.shape[-1], A1.dtype)
    df[-N:] = -displ  #displacement
    Delta_b += 0.5 * b1 + A @ df
    return A, Delta_b
Пример #46
0
    def slowS4(self, X4time, dt = 0.002, a = 1.0, ppp = [1,1,1], outputfile = ''):
        """ Compute four-point dynamic structure factor at peak timescale of dynamic susceptibility

            Based on overlap function Qt and its corresponding dynamic susceptibility QtX4     
            a is the cutoff for the overlap function, default is 1.0 (EAM) and 0.3(LJ) (0.3<d>)
            X4time is the peaktime scale of X4
            dt is the timestep in MD simulations
            Dynamics should be calculated before computing S4
            Only considered the particles which are slow
            ppp is the periodic boundary conditions
        """
        print ('-----------------Compute dynamic S4(q) of slow particles --------------')

        X4time = int(X4time / dt / self.TimeStep)
        twopidl = 2 * pi / self.Boxlength[0]
        if self.Boxlength[0] <= 40.0:
            Numofq = int(self.Boxlength[0] / twopidl)

        elif self.Boxlength[0] <= 80.0:
            Numofq = int(self.Boxlength[0] / 2 / twopidl)

        elif self.Boxlength[0] > 80.0:
            Numofq = int(self.Boxlength[0] / 4 / twopidl)

        wavevector = choosewavevector(Numofq, self.ndim) #Only S4(q) at low wavenumber range is interested
        qvalue, qcount = np.unique(wavevector[:, 0], return_counts = True)
        sqresults = np.zeros((len(wavevector[:, 0]), 3)) #the first row accouants for wavenumber

        for n in range(self.SnapshotNumber - X4time):
            RII = self.Positions[n + X4time] - self.Positions[n]
            hmatrixinv = np.linalg.inv(self.hmatrix[n])
            matrixij   = np.dot(RII, hmatrixinv)
            RII        = np.dot(matrixij - np.rint(matrixij) * ppp, self.hmatrix[n]) #remove PBC

            #RII = np.square(RII).sum(axis = 1)
            #RII = np.where(np.sqrt(RII) <= a, 1, 0)
            RII = np.linalg.norm(RII, axis = 1)
            RII = np.where(RII <= a, 1, 0)
            sqtotal = np.zeros((len(wavevector[:, 0]), 2))
            for i in range(self.ParticleNumber):
                medium   = twopidl * (self.Positions[n][i] * wavevector[:, 1:]).sum(axis = 1)
                sqtotal[:, 0] += np.sin(medium) * RII[i]
                sqtotal[:, 1] += np.cos(medium) * RII[i]
            
            sqresults[:, 1] += np.square(sqtotal).sum(axis = 1) / self.ParticleNumber
            sqresults[:, 2] += sqtotal[:, 1]

        sqresults[:, 0]  = wavevector[:, 0]
        sqresults[:, 1]  = sqresults[:, 1] / (self.SnapshotNumber - X4time)
        sqresults[:, 2]  = np.square(sqresults[:, 2] / (self.SnapshotNumber - X4time)) / self.ParticleNumber

        sqresults = pd.DataFrame(sqresults)
        results   = np.array(sqresults.groupby(sqresults[0]).mean())
        results[:, 1] = results[:, 0] - results[:, 1] / qcount

        qvalue    = twopidl * np.sqrt(qvalue)
        results   = np.column_stack((qvalue, results))
        names = 'q  S4a(q)  S4b(q)'
        if outputfile:
            np.savetxt(outputfile, results, fmt='%.6f', header = names, comments = '')

        print ('--------- Compute S4(q) of slow particles over ------')
        return results, names
Пример #47
0
for seq in sequences:
    data = pykitti.odometry(base_dir, seq, frames=None)
    length = [len(data.poses)]
    images = np.zeros(length + img_channels + img_shape, dtype=np.float32)
    scan_idx = 0

    # first need to convert each xyz
    for scan in data.velo:
        theta = np.arctan2(scan[:, 1], scan[:, 0])
        xy = np.sqrt(np.square(scan[:, 0]) + np.square(scan[:, 1]))
        az = np.arctan2(scan[:, 2], xy)

        velo_start = np.min(az)
        velo_end = np.max(az)
        spacing = (velo_end - velo_start) / 63
        az = np.rint((az - velo_start) / spacing).astype(np.int16)

        dist = np.sqrt(np.square(xy) + np.square(scan[:, 2]))

        for i in range(0, 64):
            if len(theta[az == i]) == 0:
                if mode == HORIZONTAL_INTERP:
                    images[scan_idx, 0, 63 - i, :] = np.max(dist)
                else:
                    images[scan_idx, 0, 63 - i, :] = 0
                images[scan_idx, 1, 63 - i, :] = 0
            else:
                strip_mean = binned_statistic(
                    theta[az == i], [dist[az == i], scan[az == i, 3]],
                    statistic='mean',
                    bins=enc_angles)
Пример #48
0
def make_primitive(atm, eps=None):
    """Finds the primitive cell of a given crystal and the HNF needed to
    convert the primitive to the current crystal.

    Args:
        atm (matdb.atoms.Atoms): an atoms object.
        esp (float): floating point tolerance.

    Returns:
        The lattice and atomic basis vectors of the primitive cell along
        with the HNF needed to map the primitive cell to the original cell.
    """
    #extract data from the atoms object to be used.
    a_vecs = atm.cell
    atom_pos = atm.positions
    atom_type = None
    new_vecs = None
    unique_pos = None
    unique_types = None
    try:
        atom_type = atm.get_chemical_symbols()
        atom_type = [i for i in atom_type if i != "X"]
        # mapping = {k:v for v, k in enumerate(np.unique(temp))}
        # atom_type = [mapping[i] for i in types]
    except:  #pragma: no cover
        atom_type = atm.numbers
        # mapping = {k:v for v, k in enumerate(np.unique(temp))}
        # atom_type = [mapping[i] for i in types]

    if atom_type is None or len(atom_type) == 0:
        raise ValueError(
            "The atoms object doesn't contain species information. "
            "The primitive cell can't be found without the species information."
        )
    if eps is None:
        eps = 1E-3

    #Armed with the data we can now make the cell primitive.
    num_atoms = len(atom_pos)

    latt_to_cart, cart_to_latt = _get_transformations(a_vecs)

    #Ensure that all the basis atoms are in the unit cell.
    atom_pos = np.array([
        bring_into_cell(pos, cart_to_latt, latt_to_cart, eps)
        for pos in atom_pos
    ])

    #If the cell isn't primitive then there will be lattice vectors
    #inside the cell. If a lattice vector is inside the unit cell then
    #it will bring any atom inside the cell back unto itself via a
    #fractional transaltion. Such a translation must exist for each
    #atom of the same type.
    fracts = []
    for i_atom, a_type in enumerate(atom_type):
        #Only proceed for atoms of the same type as the first type of
        #atom that aren't the first atom.
        if not a_type == atom_type[0] or i_atom == 0:
            continue

        fract = atom_pos[i_atom] - atom_pos[0]
        fract = np.array(
            bring_into_cell(fract, cart_to_latt, latt_to_cart, eps))
        for j_atom, this_type in enumerate(atom_type):
            #Find the new location of the atom after the fractional
            #translation.
            new_pos = fract + atom_pos[j_atom]
            new_pos = np.array(
                bring_into_cell(new_pos, cart_to_latt, latt_to_cart, eps))
            mapped = _does_mapping_exist(new_pos, this_type, atom_pos,
                                         atom_type, eps)
            if not mapped:
                break

        #If the loop was successfull (mapped==True) then this
        #translation takes all atoms to another of the same type so it
        #is a valid fractional translation and should be kept.
        if mapped:
            fracts.append(list(fract))

    #If the lattice isn't primitive then extra lattice points, i.e.,
    #fractional rotations, were found above.
    if len(fracts) > 0:
        #Collect all lattices points, i.e., potential new primitive vectors.
        lattice_points = deepcopy(fracts)
        lattice_points.extend(a_vecs)

        #Consider all possible triplets of the lattices points to see
        #if the form a set of primitive basis vectors. A triplet will
        #form a basis set if all lattice points will be integer
        #combinations of the triplet.
        for new_vecs in combinations(lattice_points, 3):
            try:
                cart_to_latt = np.linalg.inv(np.transpose(new_vecs))
            except:  #pragma: no cover
                continue
            for point in lattice_points:
                vec = np.matmul(cart_to_latt, point)

                #Check if the new vecs are all integers. If not then
                #this lattice point wasn't preserved, move on to the
                #next one.
                mapped = True
                if not np.allclose(vec, np.rint(vec),
                                   rtol=eps):  #pragma: no cover
                    # I could never get this flag to fire.
                    mapped = False
                    msg.err(
                        "Reached portion of code that has never been tested. "
                        "Please submit following to developers for testing: "
                        "a_vecs: {0}, atom_pos: {1}, atom_types: "
                        "{2}".format(a_vecs, atom_pos, atom_type))
                    break

            #If all lattice points were mapped then we've found a valid new basis and can exit.
            if mapped:
                break

        if not mapped:  #pragma: no cover
            raise LogicError("Error in make_primitive. Valid basis not found.")

        #Bring all the atoms into the new cell.
        atom_pos = np.array([
            bring_into_cell(pos, cart_to_latt, np.transpose(new_vecs), eps)
            for pos in atom_pos
        ])
        # Check for redundant atoms in the basis. If any are present then remove them.
        unique_pos = []
        unique_types = []
        removed = np.zeros(len(atom_type))
        for i_atom, this_type in enumerate(atom_type):
            pos = atom_pos[i_atom]
            mapped = False
            for j in range(i_atom + 1, len(atom_type)):
                if (atom_type[j] == this_type) and (np.allclose(
                        pos, atom_pos[j], rtol=eps)):
                    mapped = True
                    removed[i_atom] = i_atom + 1

            if not mapped:
                unique_pos.append(pos)
                unique_types.append(this_type)

    if new_vecs is None:
        new_vecs = a_vecs
    if unique_pos is None:
        unique_pos = atom_pos
    if unique_types is None:
        unique_types = atom_type
    #Now that we have the new atomic and lattice basis we need to
    #define the HNF that mapps the primitive to the supercell.
    n = np.rint(
        np.matmul(np.linalg.inv(np.transpose(new_vecs)), np.transpose(a_vecs)))
    hnf, b = hermite_normal_form(n)
    hnf = hnf.astype(int)
    return (new_vecs, unique_pos, unique_types, hnf)
Пример #49
0
coo = []
for ii in range(len(ra)):
    coo.append([ra[ii], dec[ii]])
coo = np.array(coo, dtype=float)
pix_coo = ww.wcs_world2pix(coo, 1)
for ii in range(len(gal)):
    print(gal[ii], pix_coo[ii, 0], pix_coo[ii, 1])

#calculating the coordinates in case I need to paste the galaxies back in the image
x0 = pix_coo[:, 0] - (side / 2.)
x1 = pix_coo[:, 0] + (side / 2.)
y0 = pix_coo[:, 1] - (side / 2.)
y1 = pix_coo[:, 1] + (side / 2.)

#Round elements of the array to the nearest integer
x0 = np.rint(x0)
x0 = x0.astype(int)
x1 = np.rint(x1)
x1 = x1.astype(int)
y0 = np.rint(y0)
y0 = y0.astype(int)
y1 = np.rint(y1)
y1 = y1.astype(int)

#creating the galaxy stamps
for ii in range(len(gal)):
    montage.mSubimage_pix(path_img + name_img,
                          "./galaxy_images/" + gal[ii] + ".fits", x0[ii],
                          y0[ii], side)
    montage.mSubimage_pix(path_rms + name_rms,
                          "./sigma_images/" + gal[ii] + "_rms.fits", x0[ii],
Пример #50
0
    else:
        print("Model not found")

    return img


if __name__ == '__main__':

    args = read_args()

    print("\n Predicting with %s ... \n" % args.model)

    vidcap = cv2.VideoCapture(args.input)
    total_frames = vidcap.get(cv2.CAP_PROP_FRAME_COUNT)
    fps = vidcap.get(cv2.cv2.CAP_PROP_FPS)
    offset = np.rint(fps) / args.frame_rate
    success, frame = vidcap.read()
    frame_count = 0

    model, preprocess_mode = load_model(args)

    features = []

    while success:

        img = load_image(frame, args.model)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x, mode=preprocess_mode)
        preds = model.predict(x)
 def local_to_grid(self, north_offset, east_offset):
     if len(self.local_goal) == 2:
         ng, eg = self.local_goal
     elif len(self.local_goal) == 3:
         ng, eg, ag = self.local_goal
     self.grid_goal = grid_goal = (int(np.rint(ng - north_offset)), int(np.rint(eg - east_offset)))
Пример #52
0
def procimg(Isize1,Isize2,scale,mask_tag,A_matrix,rvec,DATA,latxdim,latydim,latzdim,procid):
  # returns a 3D lattice with integrated data from a chunk of data points
  # define the lattice indices at which h,k,l = 0,0,0
  i0=int(latxdim/2-1)
  j0=int(latydim/2-1)
  k0=int(latzdim/2-1)
  # total number of voxels in the lattice
  latsize = latxdim*latydim*latzdim
  lat = np.zeros(latsize*2, dtype=np.float32).reshape((2,latsize))
#  lat = csr_matrix((2,latsize),dtype=np.float32)

  # calculate the range of data points to be integrated in this process
  chunksize = int(Isize2/nproc)
  if (Isize2 % nproc !=  0):
    chunksize += 1
  y1 = procid*chunksize
  y2 = y1 + chunksize
  if (y2>Isize2):
    y2=Isize2
  # walk through the data points and accumulate the integrated data in the lattice
  At = A_matrix
#  print A_matrix
#  print At
#  print np.dot(np.asarray(rvec),At)[0]
#  print A_matrix*col(rvec[0])
  rvec_size = len(rvec)
  print "SAMPLES"
  print rvec[50000:50010]
  print DATA[50000:50010]
  for y in xrange(y1,y2): # fast dimension
    Hlist = np.dot(np.asarray(rvec[y:rvec_size:Isize2]),At)
    if (y<10):
        print "H[",y,"] = (",Hlist[y],")"
    Hintlist = np.rint(Hlist)
    dHlist = np.abs(Hlist - Hintlist)
    vallist = np.asarray(DATA[y*Isize1:(y+1)*Isize1])
    Hintpphkllist = np.rint(Hlist*float(pphkl))
    ilist = Hintpphkllist[:,0] + i0
    jlist = Hintpphkllist[:,1] + j0
    klist = Hintpphkllist[:,2] + k0
    maskrightvals = np.logical_and((vallist>0),(vallist != mask_tag))
    masknotnearhkl = np.logical_or(np.logical_or((dHlist[:,0] >= 0.25),(dHlist[:,1] >= 0.25)),(dHlist[:,2] >= 0.25))
#    print 'not near hkl = {0}, total = {1}'.format(np.sum(masknotnearhkl),len(dHlist[:,0]))
    maskinbounds = np.logical_and(np.logical_and(np.logical_and(np.logical_and(np.logical_and(ilist>=0,jlist>=0),klist>=0),ilist<latxdim),jlist<latydim),klist<latzdim)
    if (filterhkl):
        maskall = np.logical_and(np.logical_and(maskrightvals,masknotnearhkl),maskinbounds)
    else:
        maskall = np.logical_and(maskrightvals,maskinbounds)
#    indexlist = klist*latxdim*latydim + jlist*latxdim + ilist
    indexlist = np.int32(klist[maskall]*latxdim*latydim + jlist[maskall]*latxdim + ilist[maskall])
#    print len(indexlist2)
#    indexlist[maskall] = 0
    valscalelist = vallist[maskall]*scale
    for x in range(len(indexlist)): # slow dimension
#        if maskall[x]:
            lat[0][indexlist[x]] += valscalelist[x]
            lat[1][indexlist[x]] += 1
      # calculate index into the data points
#      z = x*Isize2 + y
#      z = y*Isize1 + x
#      tmid = time()
      # calculate h,k,l for this data point
#      H = A_matrix * col(rvec[z])
#      H = np.dot(np.asarray(rvec_list[x]),At)
#      print H
#      print A_matrix * col(rvec[z])
#      hh = Hintlist[x][0]
#      kk = Hintlist[x][1]
#      ll = Hintlist[x][2]
      # calculate the displacement of this data point from the nearest Miller index
#      dh = dHlist[x][0]
#      dk = dHlist[x][1]
#      dl = dHlist[x][2]
# labelit values
#      val = int(DATA[(x,y)])
#dials values
#      val = int(DATA[(y,x)])
#      val = int(DATA[y*Isize1+x])
#      val = vallist[x]
      # integrate the data only if it's not in the immediate neighborhood of a Bragg peak
#      if ((val != mask_tag) and (val != 0) and not((dh < .25) and (dk < .25) and (dl < .25))):
        # i, j, and k are indices into the lattice
#      if (not maskwrongvals[x] and not masknearhkl[x]):
#        i = ilist[x]
#        j = jlist[x]
#        k = klist[x]
#        if ((i>=0) and (j>=0) and (k>=0) and (i<latxdim) and (j<latydim) and (k<latzdim)):
#        if maskbounds[x]:
#        index = indexlist[x]
#          index = k*latxdim*latydim + j*latxdim + i
#          if ((val>0)  and (val < 32767)):
            # accumulate the data for this lattice point. keep track of count for averaging
#        lat[0][index] += val*scale
#        lat[1][index] += 1
#  lat[0][0] = 0
#  lat[1][0] = 0
  return lat
Пример #53
0
def emissions(input_stocks="data/emissions/AGB.tif",
              input_forest="output/forest_cover_2050.tif",
              coefficient=0.47,
              blk_rows=128):
    """Predict the carbon emissions associated to future deforestation.

    This function predicts the carbon emissions associated to future
    deforestation. Computation are done by block and can be performed
    on large geographical areas.

    :param input_stocks: path to raster of biomass or carbon stocks (in Mg/ha).
    :param input_forest: path to forest-cover change raster (0=deforestation).
    :param coefficient: coefficient to convert stocks in MgC/ha (can be 1).
    :param blk_rows: if > 0, number of rows for computation by block.
    :return: emissions of carbon in MgC.

    """

    # Landscape variables from forest raster
    forestR = gdal.Open(input_forest)
    gt = forestR.GetGeoTransform()
    ncol = forestR.RasterXSize
    nrow = forestR.RasterYSize
    Xmin = gt[0]
    Xmax = gt[0] + gt[1] * ncol
    Ymin = gt[3] + gt[5] * nrow
    Ymax = gt[3]

    # Make vrt
    print("Make virtual raster")
    raster_list = [input_forest, input_stocks]
    param = gdal.BuildVRTOptions(resolution="user",
                                 outputBounds=(Xmin, Ymin, Xmax, Ymax),
                                 xRes=gt[1], yRes=-gt[5],
                                 separate=True)
    gdal.BuildVRT("/vsimem/var.vrt", raster_list, options=param)
    stack = gdal.Open("/vsimem/var.vrt")

    # NoData value for stocks
    # stocksB = stack.GetRasterBand(2)
    # stocksND = stocksB.GetNoDataValue()

    # Make blocks
    blockinfo = makeblock("/vsimem/var.vrt", blk_rows=blk_rows)
    nblock = blockinfo[0]
    nblock_x = blockinfo[1]
    x = blockinfo[3]
    y = blockinfo[4]
    nx = blockinfo[5]
    ny = blockinfo[6]
    print("Divide region in {} blocks".format(nblock))

    # Computation by block
    # Total sum
    sum_Stocks = 0
    # Message
    print("Compute carbon emissions by block")
    # Loop on blocks of data
    for b in range(nblock):
        # Progress bar
        progress_bar(nblock, b + 1)
        # Position in 1D-arrays
        px = b % nblock_x
        py = b // nblock_x
        # Data for one block of the stack (shape = (nband,nrow,ncol))
        data = stack.ReadAsArray(x[px], y[py], nx[px], ny[py])
        data_Stocks = data[1]
        # data_Stocks[data_Stocks == StocksND] = 0
        # Previous line doesn't work because StocksND
        # differs from NoData value in ReadAsArray
        data_Stocks[data_Stocks < 0] = 0
        data_Forest = data[0]
        # Sum of emitted stocks
        sum_Stocks = sum_Stocks + np.sum(data_Stocks[data_Forest == 0])
    # Pixel area (in ha)
    Area = gt[1] * (-gt[5]) / 10000
    # Carbon emissions in Mg
    Carbon = sum_Stocks * coefficient * Area
    Carbon = np.int(np.rint(Carbon))

    # Return carbon emissions
    return(Carbon)
Пример #54
0
            metrics=['accuracy', auroc, auprc, f1_m, recall_m, precision_m])

        #use predictions from all small neural net as input to train
        #load prediction features from mini-net

        #train the model
        history = model.fit(x_train,
                            y_train,
                            batch_size=1,
                            epochs=40,
                            validation_split=0.0)

        #predict the results on the out of sample images
        y_pred = model.predict(x_test)
        y_pred.input_shape
        accuracy_s = sklearn.metrics.accuracy_score(y_test, np.rint(y_pred))

        figure_dir = "/gpfs/ysm/scratch60/gerstein/zc264/FaceForensics/classification/results/"
        os.system("mkdir -p " + figure_dir)
        figure_output_name = figure_dir + "final"

        # plot accuracy
        plt.figure()
        plt.plot(history.history['acc'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train'], loc='upper left')
        plt.savefig(figure_output_name + '.accuracy.png')

        # plot loss over time in cell A
Пример #55
0
 def NumBinsOfHistogram(self):
     p = self.params
     return int(
         np.rint(p.metadata.MaximumDistance() /
                 p.metadata.DistanceBinWidth()))
    def plan_path(self):
        self.flight_state = States.PLANNING
        t0 = time.time()
        
        print("Searching for a path ...")
        
        TARGET_ALTITUDE = 5
        SAFETY_DISTANCE = 5
        VOXEL_SIZE = 5
        MAX_ALTITUDE = 250

        # Number of nodes to prune at max
        max_prune = 20

        self.target_position[2] = TARGET_ALTITUDE

        # If WPs calculated previously, send them directly
        # Work around for timeout
        if self.waypoints is not None and len(self.waypoints) > 0:
            time.sleep(2)
            print("waypoints:")
            print(self.waypoints)
            print(self.flight_state, self.in_mission, self.connected)
            self.send_waypoints()
            return    
        
        np.set_printoptions(precision=3)
        print('global home {0}, position {1}, local position {2}'.format(
            self.global_home, self.global_position, self.local_position))

        # Read in obstacle map
        data = np.loadtxt('colliders.csv', delimiter=',', dtype='Float64', skiprows=2)
        
        # TODO CHECK if all of them are used TODO
        # Define a grid for a particular altitude and safety margin around obstacles
        grid, north_offset, east_offset = create_grid(data, TARGET_ALTITUDE, SAFETY_DISTANCE)
        grid25, _,_ = create_grid25(data, SAFETY_DISTANCE)
        voxmap, north_offvox, east_offvox = create_voxmap(data, safety_distance=SAFETY_DISTANCE, max_altitude=MAX_ALTITUDE, voxel_size=VOXEL_SIZE)
        voxfull, north_offvox, east_offvox = create_voxmap(data, safety_distance=SAFETY_DISTANCE, max_altitude=MAX_ALTITUDE, voxel_size=1)
        
        grid_shape = grid.shape
        grid25_shape = grid25.shape
        vox_shape = voxmap.shape
        print("Grid shape = {0}, Grid2.5 shape = {1}, vox shape = {2}".format(
            grid_shape, grid25_shape, vox_shape))
        print("North offset = {0}, east offset = {1}, grid shape = {2}".format(north_offset, east_offset, grid.shape))
        print("North offvox = {0}, east offvox = {1}, vox shape = {2}".format(north_offvox, east_offvox, voxmap.shape))

        # Define starting point on the grid (this is just grid center)
        with open('colliders.csv') as f:
            first_line = f.readline().strip()
        latlon = first_line.split(',')
        lon0 = float(latlon[0].strip().split(' ')[1])
        lat0 = float(latlon[1].strip().split(' ')[1])      

        # TODO: convert start position to current position rather than map center
        # TODO: set home position to (lon0, lat0, 0)    
        self.set_home_position(lat0, lon0, 0)

        # TODO: retrieve current global position
        # TODO: convert to current local position using global_to_local()
        local_pos = global_to_local(self.global_position, global_home=self.global_home)
        north, east, att = local_pos

        grid_start = (int(np.rint(north - north_offset)), int(np.rint(east - east_offset)), int(np.rint(self.global_position[2])) + TARGET_ALTITUDE)
        vox_start = (grid_start[0] // VOXEL_SIZE,
                grid_start[1] // VOXEL_SIZE,
                grid_start[2] // VOXEL_SIZE)
        print("Grid Start: ",grid_start, "Vox Start: ", vox_start)

        if self.global_goal is not None:
            if len(self.global_goal) == 2:
                self.global_goal = (self.global_goal[0], self.global_goal[1], TARGET_ALTITUDE)
            self.global_to_grid(north_offset, east_offset)
        elif self.local_goal is not None:
            self.local_to_grid(north_offset, east_offset)    

        # If goal location is in an obstacle, goal is above it
        if self.grid_goal is not None:
            self.place_goal_in_grid(grid_shape)
            grid_goal = tuple(map(int, self.grid_goal))
            print("Goal is set to {0} with the parameter".format(grid_goal))
            goal_north, goal_east = grid_goal[:2]
            
        else:
            # randomly select a goal    
            dist_idx = 100.0

            change = np.random.rand(3)
            change[1:2] -= 0.5
            # TODO LOOK FOR THE VALUE OF CHANGE!!!
            print("change", change)
            goal = (self.global_home[0] + change[0] / dist_idx,
                    self.global_home[1] + change[1] / (dist_idx),
                    self.global_home[2] + change[2] * float(MAX_ALTITUDE))
            print("Goal Global: ", goal)
            local_goal = global_to_local(goal, global_home=self.global_home)
            print("Goal Local: ", local_goal)
            ng, eg, ag = local_goal

            goal_north = int(np.rint(ng - north_offset))
            goal_east = int(np.rint(eg - east_offset))

        if goal_north < 1:
            goal_north = 1
        elif goal_north > grid_shape[0] - 2:
            goal_north = grid_shape[0] - 2
        if goal_east < 1:
            goal_east = 1
        elif goal_east > grid_shape[1] - 2:
            goal_east = grid_shape[1] - 2
                    
        goal_alt= int(np.rint(grid25[goal_north, goal_east] + SAFETY_DISTANCE))
        grid_goal = (goal_north, goal_east, goal_alt)
        self.landing_altitude = grid25[goal_north, goal_east]

        vox_goal = (grid_goal[0] // VOXEL_SIZE,
                    grid_goal[1] // VOXEL_SIZE,
                    grid_goal[2] // VOXEL_SIZE)

        while voxmap[vox_goal[0], vox_goal[1], vox_goal[2]]:
            vox_goal = (vox_goal[0], vox_goal[1], vox_goal[2]+1)
            grid_goal = (grid_goal[0], grid_goal[1], grid_goal[2] + VOXEL_SIZE)

        if vox_goal[0] >= vox_shape[0]:
            vox_goal = (vox_shape[0] - 1, vox_goal[1], vox_goal[2])
        elif vox_goal[0] < 1:
            vox_goal = (1, vox_goal[1], vox_goal[2])
        if vox_goal[1] >= vox_shape[1]:
            vox_goal = (grid_goal[0], vox_shape[1] - 1, vox_goal[2])
        elif vox_goal[1] < 1:
            vox_goal = (vox_goal[0], 1, vox_goal[2])

        print('Grid Start and Goal: ', grid_start, grid_goal)
        print('Vox Start and Goal: ', vox_start, vox_goal)
        # plot(grid, grid_start, grid_goal)

        # Run A* to find a path from start to goal
        # TODO: add diagonal motions with a cost of sqrt(2) to your A* implementation
        # or move to a different search space such as a graph (not done here)
        vox_path, cost = a_star3D2(voxmap, heuristic3D, vox_start, vox_goal)
        l_path = len(vox_path)
        print("Path length:", l_path," cost:", cost)
        
        path = [(p[0] * VOXEL_SIZE, p[1] * VOXEL_SIZE,p[2] * VOXEL_SIZE) for p in vox_path]    
        p1 = path[1]
        pn_1 = path[len(path) - 2]
        
        print("PATH START: grid_start:", grid_start, " p1:", p1)
        path_start, cost_start = a_star3D2(voxfull, heuristic3D, grid_start, p1, info=False)
        print("PATH GOAL: grid_goal:", grid_goal, " p1:", pn_1)
        path_goal, cost_goal = a_star3D2(voxfull, heuristic3D, grid_goal, pn_1, info=False)
        
        path = path_start + path[1:len(path) - 2] + path_goal
        cost = cost + cost_start + cost_goal

        #if VOXEL_SIZE == 1:
        #    plot(grid, grid_start, grid_goal, path=path)
        #plot3D(voxmap, vox_start, vox_goal, path=path)
        
        # TODO: prune path to minimize number of waypoints
        # TODO (if you're feeling ambitious): Try a different approach altogether!    
        pruned_path = prune_path(path, VOXEL_SIZE, max_prune)
        print("Pruned Path length: ", len(pruned_path))
        
        plot(grid, grid_start, grid_goal, path=pruned_path)
        plot3D(voxmap, vox_start, vox_goal, path=vox_path)
            
        #print("A* path:")
        #for p in path:
        #    print(p, voxmap[p[0], p[1], p[2]])
            
        #print("Pruned_path:")
        #for p in pruned_path:
        #    print(p)
        #    print(p, p.cost, p.delta, (p.delta[0] + north_offset, p.delta[1] + east_offset, TARGET_ALTITUDE, 0))
        
        
        head = []
            
        for t in range(len(pruned_path)):
                
            if t == 0:
                head.append(0)
            else:
                head.append(np.arctan2((pruned_path[t][1]-pruned_path[t-1][1]), (pruned_path[t][0]-pruned_path[t-1][0])))

        # Convert path to waypoints
        waypoints = [[p[0] + north_offset, p[1] + east_offset, p[2], head[i]] for i,p in enumerate(pruned_path)]
        #waypoints = [[p[0] + north_offset, p[1] + east_offset, p[2], 0] for p in pruned_path]

        if recon == 'Yes':
            helix_waypoints = create_helix_trajectory(first = waypoints[-1])
            waypoints = waypoints + helix_waypoints
        
        # Set self.waypoints
        self.waypoints = waypoints
        # TODO: send waypoints to sim (this is just for visualization of waypoints)
        t_int = time.time() - t0
        
        # If timeout, don't send WPs 
        # End this instance, main will start a new instance
        if t_int < self.timeout:
            print("no timeout, continue")
            self.send_waypoints()
        else:
            print("timeout, send wp to a new drone instance")
            self.is_timeout = True
            self.disarming_transition()
            self.manual_transition()
Пример #57
0
            W1.reshape((Nout, Nin, 1, 1, 1))).to(self.fc1.weight.device),
                                             requires_grad=True)

    def forward(self, spikeInput):
        return self.slayer.spike(self.fc1(self.slayer.psp(spikeInput)))


snn = Network(net_params).to(device)

# load input spikes
spikeAER = np.loadtxt('test_files/snnData/spikeIn.txt')
spikeAER[:, 0] /= net_params['simulation']['Ts']
spikeAER[:, 1] -= 1

spikeData = np.zeros((Nin, Ns))
for (tID, nID) in np.rint(spikeAER).astype(int):
    if tID < Ns: spikeData[nID, tID] = 1 / net_params['simulation']['Ts']
spikeIn = torch.FloatTensor(spikeData.reshape((1, Nin, 1, 1, Ns))).to(device)

spikeOut = snn.forward(spikeIn)

# desired class must be a boolean tensor whose first four dimension is same as output tensor
# and last dimension (time dimension) is ONE
desiredClass = torch.zeros((1, Nout, 1, 1, 1)).to('cpu')
desiredClass[0, 4, 0, 0, 0] = 1  # assuming true class is class 5

# error = spikeLoss(snn.slayer, net_params['training']['error']).to(device)
error = spikeLoss(net_params).to(device)
loss = error.numSpikes(spikeOut, desiredClass)

loss.backward()
Пример #58
0
def clustered_contours(img,
                       grouped_contour_indices,
                       roi_objects,
                       roi_obj_hierarchy,
                       nrow=1,
                       ncol=1):
    """
    This function takes the outputs from cluster_contours

    Inputs:
    img                     = RGB or grayscale image data for plotting
    grouped_contour_indices = Indices for grouping contours
    roi_objects             = object contours in an image that are needed to be clustered.
    roi_obj_hierarchy       = object hierarchy

    Returns:
    clustered_image         = Labeled clusters image

    :param img: numpy.ndarray
    :param grouped_contour_indices: list
    :param roi_objects: list
    :param roi_obj_hierarchy: numpy.ndarray

    :return clustered_image: numpy.ndarray
    """

    clustered_image = np.copy(img)
    iy, ix = np.shape(img)[:2]

    # Gray input images need to get converted to RGB for plotting colors
    if len(np.shape(img)) == 2:
        clustered_image = cv2.cvtColor(clustered_image, cv2.COLOR_GRAY2RGB)

    # Plot grid if nrow or ncol are changed from the default
    if nrow > 1 or ncol > 1:
        rbreaks = range(0, iy, int(np.rint(iy / nrow)))
        cbreaks = range(0, ix, int(np.rint(ix / ncol)))
        for y in rbreaks:
            cv2.line(clustered_image, (0, y), (ix, y), (255, 0, 0),
                     params.line_thickness)
        for x in cbreaks:
            cv2.line(clustered_image, (x, 0), (x, iy), (255, 0, 0),
                     params.line_thickness)

    rand_color = color_palette(len(grouped_contour_indices))
    grouped_contours = []
    for i, x in enumerate(grouped_contour_indices):
        for a in x:
            if roi_obj_hierarchy[0][a][3] > -1:
                pass
            else:
                cv2.drawContours(clustered_image,
                                 roi_objects,
                                 a,
                                 rand_color[i],
                                 -1,
                                 hierarchy=roi_obj_hierarchy)
                # Add contour to list to get grouped
                grouped_contours.append(roi_objects[a])
        if len(grouped_contours) > 0:
            # Combine contours into a single contour
            grouped_contours = np.vstack(grouped_contours)
            # Plot the bounding circle around the contours that got grouped together
            center, radius = cv2.minEnclosingCircle(points=grouped_contours)
            cv2.circle(img=clustered_image,
                       center=(int(center[0]), int(center[1])),
                       radius=int(radius),
                       color=rand_color[i],
                       thickness=params.line_thickness,
                       lineType=8)
            # Label the cluster ID
            cv2.putText(img=clustered_image,
                        text=str(i),
                        org=(int(center[0]), int(center[1])),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=params.text_size,
                        color=(200, 200, 200),
                        thickness=params.text_thickness)
        # Empty the grouped_contours list for the next group
        grouped_contours = []

    if params.debug == 'print':
        print_image(
            clustered_image,
            os.path.join(params.debug_outdir,
                         str(params.device) + '_clusters.png'))
    elif params.debug == 'plot':
        plot_image(clustered_image)

    return clustered_image
Пример #59
0
def make_trigger_line(time_array, jump_resolution):
    positions = np.rint(time_array / jump_resolution).astype(int)

    trigger_line = np.zeros(np.max(positions) + 1, dtype='uint32')
    trigger_line[positions] = True
    return trigger_line
Пример #60
0
def _distribute_forces(supercell, disp, forces, filename, symprec):
    natom = supercell.get_number_of_atoms()
    lattice = supercell.get_cell()
    symbols = supercell.get_chemical_symbols()
    positions = supercell.get_positions()
    positions[disp[0]] += disp[1]
    cell = Atoms(cell=lattice, positions=positions, symbols=symbols, pbc=True)
    symmetry = Symmetry(cell, symprec)
    independent_atoms = symmetry.get_independent_atoms()

    # Rotation matrices in Cartesian
    rotations = []
    for r in symmetry.get_symmetry_operations()['rotations']:
        rotations.append(similarity_transformation(lattice.T, r))

    map_operations = symmetry.get_map_operations()
    map_atoms = symmetry.get_map_atoms()

    atoms_in_dot_scf = _get_independent_atoms_in_dot_scf(filename)

    if len(forces) != len(atoms_in_dot_scf):
        print("%s does not contain necessary information." % filename)
        print("Plese check if there are \"FGL\" lines with")
        print("\"total forces\" are required.")
        return False

    if len(atoms_in_dot_scf) == natom:
        print("It is assumed that there is no symmetrically-equivalent "
              "atoms in ")
        print("\'%s\' at wien2k calculation." % filename)
        force_set = forces
    elif len(forces) != len(independent_atoms):
        print(
            "Non-equivalent atoms of %s could not be recognized by phonopy." %
            filename)
        return False
    else:
        # 1. Transform wien2k forces to those on independent atoms
        indep_atoms_to_wien2k = []
        forces_remap = []
        for i, pos_wien2k in enumerate(atoms_in_dot_scf):
            for j, pos in enumerate(cell.get_scaled_positions()):
                diff = pos_wien2k - pos
                diff -= np.rint(diff)
                if (abs(diff) < symprec).all():
                    forces_remap.append(
                        np.dot(rotations[map_operations[j]], forces[i]))
                    indep_atoms_to_wien2k.append(map_atoms[j])
                    break

        if len(forces_remap) != len(forces):
            print("Atomic position mapping between Wien2k and phonopy failed.")
            print("If you think this is caused by a bug of phonopy")
            print("please report it in the phonopy mainling list.")
            return False

        # 2. Distribute forces from independent to dependent atoms.
        force_set = []
        for i in range(natom):
            j = indep_atoms_to_wien2k.index(map_atoms[i])
            force_set.append(
                np.dot(rotations[map_operations[i]].T, forces_remap[j]))

    return force_set