Example #1
0
 def addLogEntry(self, entry, save=True):
     """This is called when a new log entry is created"""
     # create log directory if it doesn't exist
     # error will be thrown if this is not possible
     _busy = Purr.BusyIndicator()
     self._initIndexDir()
     # discard temporary watchers -- these are only used to keep track of
     # deleted files
     self.temp_watchers = {}
     # ignored entries are only there to carry info on ignored data products
     # All we do is save them, and update DP policies based on them
     if entry.ignore:
         entry.save(self.logdir)
     # proper entries are added to list
     else:
         self.entries.append(entry)
         Purr.progressMessage("Saving new log entry")
         # find previous entry -- skip over "ignore" entries
         for prev in self.entries[-2::-1]:
             if not prev.ignore:
                 break
         else:
             prev = None
         entry.setLogDirectory(self.logdir)
         entry.setPrevUpNextLinks(prev=prev,
                                  up=os.path.join("..",
                                                  Purr.RenderIndex.INDEX))
         entry.save()
         self.timestamp = self.last_scan_timestamp
         # regenerate links of previous entry
         prev and prev.generateIndex()
         # and our log may need to be regenerated
         if save:
             self.save()
     self.updatePoliciesFromEntry(entry, new=True)
Example #2
0
 def addLogEntry(self, entry, save=True):
     """This is called when a new log entry is created"""
     # create log directory if it doesn't exist
     # error will be thrown if this is not possible
     _busy = Purr.BusyIndicator()
     self._initIndexDir()
     # discard temporary watchers -- these are only used to keep track of
     # deleted files
     self.temp_watchers = {}
     # ignored entries are only there to carry info on ignored data products
     # All we do is save them, and update DP policies based on them
     if entry.ignore:
         entry.save(self.logdir)
     # proper entries are added to list
     else:
         self.entries.append(entry)
         Purr.progressMessage("Saving new log entry")
         # find previous entry -- skip over "ignore" entries
         for prev in self.entries[-2::-1]:
             if not prev.ignore:
                 break
         else:
             prev = None
         entry.setLogDirectory(self.logdir)
         entry.setPrevUpNextLinks(prev=prev, up=os.path.join("..", Purr.RenderIndex.INDEX))
         entry.save()
         self.timestamp = self.last_scan_timestamp
         # regenerate links of previous entry
         prev and prev.generateIndex()
         # and our log may need to be regenerated
         if save:
             self.save()
     self.updatePoliciesFromEntry(entry, new=True)
Example #3
0
  def regenerate (self):
    Purr.progressMessage("reading %s"%self.dp.filename,sub=True);
    # init fitsfile to None, so that _read() above is forced to re-read it
    fitsfile = pyfits.open(self.dp.fullpath);
    header = fitsfile[0].header;

    dprintf(3,"beginning render of",self.dp.fullpath); t0 = time.time();
    # write out FITS header
    self.headerfile,path,uptodate = self.subproduct("-fitsheader.html");
    if not uptodate:
      title = "FITS header for %s"%self.dp.filename;
      html = """<HTML><BODY><TITLE>%s</TITLE>
      <H2>%s</H2>
      <PRE>"""%(title,title);
      for line in header.ascard:
        line = str(line).replace("<","&lt;").replace(">","&gt;");
        html += line+"\n";
      html += """
      </PRE></BODY></HTML>\n""";
      try:
        file(path,"w").write(html);
      except:
        print "Error writing file %s"%path;
        traceback.print_exc();
        self.headerfile = None;

    # figure out number of images to include
    ndim = header['NAXIS'];
    fitsshape = [ header['NAXIS%d'%i] for i in range(1,ndim+1) ];
    self.cubesize = 'x'.join(map(str,fitsshape));
    if ndim < 2:
      raise TypeError,"can't render one-dimensional FITS files""";
    elif ndim == 2:
      fitsdata_to_images = lambda fdata:[fdata];
      nplanes = 1;
    else:
      ax1 = ax2 = None;
      # find the X/Y axes, by looking at CTYPEx
      # note that the array axes are in reverse order. I.e. if X is FITS axis 1 and Y is axis 2,
      # the array will be of e.g. shape 1,1,NY,NX, while fitsshape is [NX,NY,1,1]
      for i in range(1,ndim+1):
        ctype = header['CTYPE%d'%i];
        if [ prefix for prefix in "RA","GLON","ELON","HLON","SLON" if ctype.startswith(prefix) ] \
            or ctype in ("L","X"):
          ax1 = ndim-i;
        elif [ prefix for prefix in "DEC","GLAT","ELAT","HLAT","SLAT" if ctype.startswith(prefix) ] \
            or ctype in ("M","Y"):
          ax2 = ndim-i;
Example #4
0
 def save(self, refresh=False):
     """Saves the log.
     If refresh is set to a timestamp, will regenerate everything from scratch.
     """
     # create directory if it doesn't exist
     # error will be thrown if this is not possible
     _busy = Purr.BusyIndicator()
     Purr.progressMessage("Generating index in %s" % self.logdir)
     self._initIndexDir()
     # if refresh is True, re-save all entries.
     if refresh:
         refresh = time.time()
         for i, entry in enumerate(self.entries):
             entry.save(refresh=refresh)
     Purr.RenderIndex.writeLogIndex(self.logdir, self.logtitle, self.timestamp, self.entries, refresh=refresh)
     Purr.progressMessage("Wrote %s" % self.logdir)
Example #5
0
 def save(self, refresh=False):
     """Saves the log.
     If refresh is set to a timestamp, will regenerate everything from scratch.
     """
     # create directory if it doesn't exist
     # error will be thrown if this is not possible
     _busy = Purr.BusyIndicator()
     Purr.progressMessage("Generating index in %s" % self.logdir)
     self._initIndexDir()
     # if refresh is True, re-save all entries.
     if refresh:
         refresh = time.time()
         for i, entry in enumerate(self.entries):
             entry.save(refresh=refresh)
     Purr.RenderIndex.writeLogIndex(self.logdir,
                                    self.logtitle,
                                    self.timestamp,
                                    self.entries,
                                    refresh=refresh)
     Purr.progressMessage("Wrote %s" % self.logdir)
Example #6
0
 def save(self,
          dirname=None,
          refresh=0,
          refresh_index=True,
          emit_message=True):
     """Saves entry in the given directory. Data products will be copied over if not
     residing in that directory.
     'refresh' is a timestamp, passed to renderIndex(), causing all data products OLDER than the specified time to be regenerated.
     'refresh_index', if true, causes index files to be re-rendered unconditionally
     """
     if not refresh and not self.updated:
         return
     timestr = time.strftime("%Y%m%d-%H%M%S",
                             time.localtime(self.timestamp))
     Purr.progressMessage("Rendering entry for %s" % timestr)
     if dirname:
         self.pathname = pathname = os.path.join(
             dirname,
             "%s-%s" % (("ignore" if self.ignore else "entry"), timestr))
     elif not self.pathname:
         raise ValueError("Cannot save entry: pathname not specified")
     else:
         pathname = self.pathname
     # set timestamp
     if not self.timestamp:
         self.timestamp = int(time.time())
     # get canonized path to output directory
     pathname = Purr.canonizePath(pathname)
     if not os.path.exists(pathname):
         os.mkdir(pathname)
     # now save content
     # get device of pathname -- need to know whether we move or copy
     devnum = os.stat(pathname).st_dev
     # copy data products as needed
     dprintf(2, "saving entry %s, %d data products\n", pathname,
             len(self.dps))
     dps = []
     for dp in self.dps:
         # if archived, this indicates a previously saved data product, so ignore it
         # if ignored, no need to save the DP -- but keep it in list
         if dp.archived or dp.ignored:
             dprintf(3, "dp %s is archived or ignored, skipping\n",
                     dp.sourcepath)
             dps.append(dp)
             continue
         # file missing for some reason (perhaps it got removed on us?) skip data product entirely
         if not os.path.exists(dp.sourcepath):
             dprintf(2, "data product %s missing, ignoring\n",
                     dp.sourcepath)
             continue
         Purr.progressMessage("archiving %s" % dp.filename, sub=True)
         # get normalized source and destination paths
         dprintf(2, "data product: %s, rename %s, policy %s\n",
                 dp.sourcepath, dp.filename, dp.policy)
         sourcepath = Purr.canonizePath(dp.sourcepath)
         destname = dp.fullpath = os.path.join(pathname, dp.filename)
         dprintf(2, "data product: %s -> %s\n", sourcepath, destname)
         # does the destination product already exist? skip if same file, else remove
         if os.path.exists(destname):
             if os.path.samefile(destname, sourcepath):
                 dprintf(2, "same file, skipping\n")
                 dp.timestamp = os.path.getmtime(destname)
                 dps.append(dp)
                 continue
             if os.system("/bin/rm -fr '%s'" % destname):
                 print("Error removing %s, which is in the way of %s" %
                       (destname, sourcepath))
                 print("This data product is not saved.")
                 continue
         # for directories, compress with tar
         if os.path.isdir(sourcepath):
             sourcepath = sourcepath.rstrip('/')
             if dp.policy == "copy" or dp.policy.startswith("move"):
                 dprintf(2, "archiving to tgz\n")
                 if os.system("tar zcf '%s' -C '%s' '%s'" %
                              (destname, os.path.dirname(sourcepath),
                               os.path.basename(sourcepath))):
                     print("Error archiving %s to %s" %
                           (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
                 if dp.policy.startswith("move"):
                     os.system("/bin/rm -fr '%s'" % sourcepath)
         # else just a file
         else:
             # now copy/move it over
             if dp.policy == "copy":
                 dprintf(2, "copying\n")
                 if _copy_update(sourcepath, destname):
                     print("Error copying %s to %s" %
                           (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
             elif dp.policy.startswith('move'):
                 if _move_update(sourcepath, destname):
                     print("Error moving %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
         # success, set timestamp and append
         dp.timestamp = os.path.getmtime(destname)
         dp.archived = True
         dps.append(dp)
     # reset list of data products
     self.dps = dps
     # now write out content
     self.cached_include = os.path.join(pathname, 'index.include.html')
     self.cached_include_valid = False
     self.index_file = os.path.join(pathname, "index.html")
     self.generateIndex(refresh=refresh,
                        refresh_index=refresh_index and time.time())
     self.updated = False
Example #7
0
    def regenerate(self):
        Purr.progressMessage("reading %s" % self.dp.filename, sub=True)
        # init fitsfile to None, so that _read() above is forced to re-read it
        fitsfile = fits.open(self.dp.fullpath)
        header = fitsfile[0].header

        dprintf(3, "beginning render of", self.dp.fullpath);
        t0 = time.time()
        # write out FITS header
        self.headerfile, path, uptodate = self.subproduct("-fitsheader.html")
        if not uptodate:
            title = "FITS header for %s" % self.dp.filename
            html = """<HTML><BODY><TITLE>%s</TITLE>
      <H2>%s</H2>
      <PRE>""" % (title, title)
            for line in header.ascard:
                line = str(line).replace("<", "&lt;").replace(">", "&gt;")
                html += line + "\n"
            html += """
      </PRE></BODY></HTML>\n"""
            try:
                open(path, "w").write(html)
            except:
                print("Error writing file %s" % path)
                traceback.print_exc()
                self.headerfile = None

        # figure out number of images to include
        ndim = header['NAXIS']
        fitsshape = [header['NAXIS%d' % i] for i in range(1, ndim + 1)]
        self.cubesize = 'x'.join(map(str, fitsshape))
        if ndim < 2:
            raise TypeError("can't render one-dimensional FITS files""")
        elif ndim == 2:
            fitsdata_to_images = lambda fdata: [fdata]
            nplanes = 1
        else:
            ax1 = ax2 = None
            # find the X/Y axes, by looking at CTYPEx
            # note that the array axes are in reverse order. I.e. if X is FITS axis 1 and Y is axis 2,
            # the array will be of e.g. shape 1,1,NY,NX, while fitsshape is [NX,NY,1,1]
            for i in range(1, ndim + 1):
                ctype = header['CTYPE%d' % i]
                if [prefix for prefix in ("RA", "GLON", "ELON", "HLON", "SLON") if ctype.startswith(prefix)] \
                        or ctype in ("L", "X"):
                    ax1 = ndim - i
                elif [prefix for prefix in ("DEC", "GLAT", "ELAT", "HLAT", "SLAT") if ctype.startswith(prefix)] \
                        or ctype in ("M", "Y"):
                    ax2 = ndim - i
            if ax1 is None or ax2 is None:
                ax1, ax2 = 1, 0
            arrshape = fitsshape[-1::-1]
            # this is how many planes we render, at most
            nplanes = max(self.getOption('fits-nimage'), 1)
            slices = []
            baseslice = [0] * ndim
            baseslice[ax1] = baseslice[ax2] = None
            imgshape = (arrshape[min(ax1, ax2)], arrshape[max(ax1, ax2)])
            while len(slices) < nplanes:
                slices.append(tuple(baseslice))
                for idim in range(ndim):
                    if baseslice[idim] != None:
                        baseslice[idim] += 1
                        if baseslice[idim] < arrshape[idim]:
                            break
                        else:
                            baseslice[idim] = 0
                else:
                    break
            nplanes = len(slices)

            # OK, slices contains how many slices to return
            def fitsdata_to_images(fdata, slices=slices, imgshape=imgshape):
                dprint(3, "fitsdata_to_images", slices, fdata.shape);
                t0 = time.time()
                # reshape to collapse into a 3D cube
                img = [fdata[i].reshape(imgshape) for i in slices]
                dprint(3, "collecting images took", time.time() - t0, "secs");
                t0 = time.time()
                return img

        # OK, now cycle over all images
        dprintf(3, "%s: rendering %d planes\n", self.dp.fullpath, nplanes);
        t0 = time.time()

        self.imgrec = [None] * nplanes
        # get number of bins (0 or None means no histogram)
        nbins = self.getOption("fits-hist-nbin")
        # see if histogram clipping is enabled, set hclip to None if not
        self.hclip = hclip = self.getOption("fits-hist-clip")
        if hclip == 1 or not nbins:
            hclip = None

        tsize_img = self.getOption("image-thumbnail-width"), self.getOption("image-thumbnail-height")
        tsize_hist = self.getOption("hist-thumbnail-width"), self.getOption("hist-thumbnail-height")
        self.hist_size = self.getOption("hist-width"), self.getOption("hist-height")

        # filled once we read the data
        images = None

        for num_image in range(nplanes):
            # do we have a cached status record for this image?
            recfile, recpath, uptodate = self.subproduct("-%d-stats" % num_image)
            if uptodate:
                dprintf(3, "%s(%d): stats file %s up-to-date, reading in\n", self.dp.fullpath, num_image, recfile)
                try:
                    self.imgrec[num_image] = pickle.load(file(recpath))
                    continue
                except:
                    print("Error reading stats file %s, regenerating everything" % recpath)
                    traceback.print_exc()
            # out of date, so we regenerate everything
            # build up record of stuff associated with this image
            rec = self.imgrec[num_image] = Kittens.utils.recdict()

            # generate paths for images
            rec.fullimage, img_path = self.subproductPath("-%d-full.png" % num_image)
            rec.thumbnail, img_thumb = self.subproductPath("-%d-thumb.png" % num_image)
            if pychart:
                rec.histogram_full, hf_path = self.subproductPath("-%d-hist-full.png" % num_image)
                rec.histogram_zoom, hz_path = self.subproductPath("-%d-hist-zoom.png" % num_image)
                rec.histogram_full_thumb, hf_thumb = self.subproductPath("-%d-hist-full-thumb.png" % num_image)
                rec.histogram_zoom_thumb, hz_thumb = self.subproductPath("-%d-hist-zoom-thumb.png" % num_image)

            # need to read in data at last
            if not images:
                dprint(3, "reading data");
                t0 = time.time()
                fitsdata = fitsfile[0].data
                dprint(3, "reading data took", time.time() - t0, "secs");
                t0 = time.time()
                fitsfile = None
                images = fitsdata_to_images(fitsdata)
                dprint(3, "converting to images took", time.time() - t0, "secs");
                t0 = time.time()
                fitsdata = None

            data = images[num_image]

            title = self.dp.filename
            if nplanes > 1:
                title += ", plane #%d" % num_image
            Purr.progressMessage("rendering %s" % title, sub=True)

            # min/max data values
            dprint(3, "rendering plane", num_image);
            t0 = time.time()
            datamask = ~numpy.isfinite(data)
            dprint(3, "making mask took", time.time() - t0, "secs");
            t0 = time.time()
            datamin, datamax = scipy.ndimage.measurements.extrema(data, datamask, False)[:2]
            dprint(3, "computing min/max took", time.time() - t0, "secs");
            t0 = time.time()
            rec.datamin, rec.datamax = datamin, datamax
            # mean and sigma
            rec.datamean = scipy.ndimage.measurements.mean(data, datamask, False)
            dprint(3, "computing mean took", time.time() - t0, "secs");
            t0 = time.time()
            rec.datastd = scipy.ndimage.measurements.standard_deviation(data, datamask, False)
            dprint(3, "computing std took", time.time() - t0, "secs");
            t0 = time.time()
            # thumbnail files will be "" if images are small enough to be inlined.
            # these will be None if no histogram clipping is applied
            rec.clipmin, rec.clipmax = None, None
            dprintf(3, "%s plane %d: datamin %g, datamax %g\n", self.dp.fullpath, num_image, rec.datamin, rec.datamax)
            # compute histogram of data only if this is enabled,
            # and either pychart is available (so we can produce plots), or histogram clipping is in effect
            if datamin != datamax and nbins and (pychart or hclip):
                dprintf(3, "%s plane %d: computing histogram\n", self.dp.fullpath, num_image)
                counts = scipy.ndimage.measurements.histogram(data, datamin, datamax, nbins, labels=datamask,
                                                              index=False);  # needed for 1.3+ to avoid warnings
                edges = datamin + (datamax - datamin) * (numpy.arange(nbins, dtype=float) + .5) / nbins
                dprint(3, "computing histogram took", time.time() - t0, "secs");
                t0 = time.time()
                # render histogram
                if pychart:
                    try:
                        self._make_histogram(hf_path, "Histogram of %s" % title, edges, counts)
                        dprint(3, "rendering histogram took", time.time() - t0, "secs");
                        t0 = time.time()
                    except:
                        print("Error rendering histogram %s" % hf_path)
                        traceback.print_exc()
                        rec.histogram_full = None
                    # if histogram was rendered, make a thumbnail
                    if rec.histogram_full:
                        self.makeThumb(hf_path, hf_thumb, tsize_hist)
                    else:
                        rec.histogram_full_thumb = None
                # now, compute clipped data if needed
                if hclip:
                    # find max point in histogram
                    ic = counts.argmax()
                    # compute number of points that need to be included, given the clip factor
                    target_count = int(data.size * hclip)
                    ih0 = ih1 = ic
                    totcount = counts[ic]
                    # find how many bins to include around ic, stopping when we hit the edge
                    while totcount < target_count:
                        if ih0 > 0:
                            ih0 -= 1
                            totcount += counts[ih0]
                        if ih1 < nbins - 1:
                            ih1 += 1
                            totcount += counts[ih1]
                        # just in case
                        if ih0 <= 0 and ih1 >= nbins - 1:
                            break
                    # and these are the clipping limits
                    datamin = float(edges[ih0])
                    if ih1 >= nbins - 1:
                        ih1 = nbins - 1;  # and datamax is already the clipping limit
                    else:
                        ih1 += 1
                        datamax = float(edges[ih1])
                    rec.clipmin, rec.clipmax = datamin, datamax
                    dprintf(3, "%s plane %d: clipping to %g,%g\n", self.dp.fullpath, num_image, rec.clipmin,
                            rec.clipmax)
                    # render zoomed histogram
                    if pychart:
                        if rec.clipmax != rec.clipmin:
                            zcounts = scipy.ndimage.measurements.histogram(data, rec.clipmin, rec.clipmax, nbins,
                                                                           labels=datamask,
                                                                           index=False);  # needed for 1.3+ to avoid warnings
                            zedges = rec.clipmin + (rec.clipmax - rec.clipmin) * (
                                        numpy.arange(nbins, dtype=float) + .5) / nbins
                            try:
                                self._make_histogram(hz_path, "Histogram zoom of %s" % title, zedges, zcounts)
                                dprint(3, "rendering zoomed histogram took", time.time() - t0, "secs");
                                t0 = time.time()
                            except:
                                print("Error rendering histogram %s" % hz_path)
                                traceback.print_exc()
                                rec.histogram_zoom = None
                        else:  # no meaningful zoomed area to render
                            rec.histogram_zoom = None
                        # if histogram was rendered, make a thumbnail
                        if rec.histogram_zoom:
                            histogram_zoom_thumb = self.makeThumb(hz_path, hz_thumb, tsize_hist)
                        else:
                            rec.histogram_zoom_thumb = None
                    # clip data
                    data = numpy.clip(data, datamin, datamax)
                # end of clipping
            # else no histogram for whatever reason
            else:
                rec.histogram_full = rec.histogram_zoom = rec.histogram_full_thumb = rec.histogram_zoom_thumb = None
            # ok, data has been clipped if need be. Rescale it to 8-bit integers
            t0 = time.time()
            datarng = datamax - datamin
            if datarng:
                data = (data - datamin) * (255 / datarng)
                data = data.round().astype('uint8')
                data[datamask] = 255
            else:
                data = numpy.zeros(data.shape, dtype='uint8')
            dprintf(3, "%s plane %d: rescaled to %d:%d in %f seconds\n", self.dp.fullpath, num_image, data.min(),
                    data.max(), time.time() - t0);
            t0 = time.time()
            # generate PNG image
            img = None
            try:
                img = PIL.Image.frombuffer('L', data.shape[-1::-1], numpy.getbuffer(data), "raw", 'L', 0, -1)
                dprint(3, "image frombuffer took", time.time() - t0, "secs");
                t0 = time.time()
                # img = PIL.Image.new('L',data.shape)
                # dprint(3,"new image took",time.time()-t0,"secs"); t0 = time.time()
                # imgdata = data.reshape((data.size,))
                # dprint(3,"data.reshape took",time.time()-t0,"secs"); t0 = time.time()
                # img.putdata(imgdata)
                # dprint(3,"putdata took",time.time()-t0,"secs"); t0 = time.time()
                # img = img.transpose(PIL.Image.FLIP_TOP_BOTTOM)
                # dprint(3,"transpose took",time.time()-t0,"secs"); t0 = time.time()
                img.save(img_path, 'PNG')
                dprint(3, "saving took", time.time() - t0, "secs");
                t0 = time.time()
            except:
                print("Error rendering image %s" % path)
                traceback.print_exc()
                rec.fullimage = img = None
            # if image was rendered, make a thumbnail
            if rec.fullimage:
                thumb = self.makeThumb(img_path, img_thumb, tsize_img, img=img)
                dprint(3, "rendering thumbnail took", time.time() - t0, "secs");
                t0 = time.time()
                # None means thumbnail failed
                if thumb is None:
                    rec.thumbnail = None
                # else perhaps image is its own thumbnail
                elif thumb is img_path:
                    rec.thumbnail = rec.fullimage
            else:
                rec.thumbnail = None
            # write stats
            try:
                pickle.dump(rec, file(recpath, 'w'))
            except:
                print("Error writing stats file  %s" % recpath)
                traceback.print_exc()
Example #8
0
 def save(self, dirname=None, refresh=0, refresh_index=True, emit_message=True):
     """Saves entry in the given directory. Data products will be copied over if not
     residing in that directory.
     'refresh' is a timestamp, passed to renderIndex(), causing all data products OLDER than the specified time to be regenerated.
     'refresh_index', if true, causes index files to be re-rendered unconditionally
     """
     if not refresh and not self.updated:
         return
     timestr = time.strftime("%Y%m%d-%H%M%S", time.localtime(self.timestamp))
     Purr.progressMessage("Rendering entry for %s" % timestr)
     if dirname:
         self.pathname = pathname = os.path.join(dirname, "%s-%s" %
                                                 (("ignore" if self.ignore else "entry"), timestr))
     elif not self.pathname:
         raise ValueError("Cannot save entry: pathname not specified")
     else:
         pathname = self.pathname
     # set timestamp
     if not self.timestamp:
         self.timestamp = int(time.time())
     # get canonized path to output directory
     pathname = Purr.canonizePath(pathname)
     if not os.path.exists(pathname):
         os.mkdir(pathname)
     # now save content
     # get device of pathname -- need to know whether we move or copy
     devnum = os.stat(pathname).st_dev
     # copy data products as needed
     dprintf(2, "saving entry %s, %d data products\n", pathname, len(self.dps))
     dps = []
     for dp in self.dps:
         # if archived, this indicates a previously saved data product, so ignore it
         # if ignored, no need to save the DP -- but keep it in list
         if dp.archived or dp.ignored:
             dprintf(3, "dp %s is archived or ignored, skipping\n", dp.sourcepath)
             dps.append(dp)
             continue
         # file missing for some reason (perhaps it got removed on us?) skip data product entirely
         if not os.path.exists(dp.sourcepath):
             dprintf(2, "data product %s missing, ignoring\n", dp.sourcepath)
             continue
         Purr.progressMessage("archiving %s" % dp.filename, sub=True)
         # get normalized source and destination paths
         dprintf(2, "data product: %s, rename %s, policy %s\n", dp.sourcepath, dp.filename, dp.policy)
         sourcepath = Purr.canonizePath(dp.sourcepath)
         destname = dp.fullpath = os.path.join(pathname, dp.filename)
         dprintf(2, "data product: %s -> %s\n", sourcepath, destname)
         # does the destination product already exist? skip if same file, else remove
         if os.path.exists(destname):
             if os.path.samefile(destname, sourcepath):
                 dprintf(2, "same file, skipping\n")
                 dp.timestamp = os.path.getmtime(destname)
                 dps.append(dp)
                 continue
             if os.system("/bin/rm -fr '%s'" % destname):
                 print("Error removing %s, which is in the way of %s" % (destname, sourcepath))
                 print("This data product is not saved.")
                 continue
         # for directories, compress with tar
         if os.path.isdir(sourcepath):
             sourcepath = sourcepath.rstrip('/')
             if dp.policy == "copy" or dp.policy.startswith("move"):
                 dprintf(2, "archiving to tgz\n")
                 if os.system("tar zcf '%s' -C '%s' '%s'" % (destname,
                                                             os.path.dirname(sourcepath),
                                                             os.path.basename(sourcepath))):
                     print("Error archiving %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
                 if dp.policy.startswith("move"):
                     os.system("/bin/rm -fr '%s'" % sourcepath)
         # else just a file
         else:
             # now copy/move it over
             if dp.policy == "copy":
                 dprintf(2, "copying\n")
                 if _copy_update(sourcepath, destname):
                     print("Error copying %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
             elif dp.policy.startswith('move'):
                 if _move_update(sourcepath, destname):
                     print("Error moving %s to %s" % (sourcepath, destname))
                     print("This data product is not saved.")
                     continue
         # success, set timestamp and append
         dp.timestamp = os.path.getmtime(destname)
         dp.archived = True
         dps.append(dp)
     # reset list of data products
     self.dps = dps
     # now write out content
     self.cached_include = os.path.join(pathname, 'index.include.html')
     self.cached_include_valid = False
     self.index_file = os.path.join(pathname, "index.html")
     self.generateIndex(refresh=refresh, refresh_index=refresh_index and time.time())
     self.updated = False
Example #9
0
      # need to read in data at last
      if not images:
        dprint(3,"reading data"); t0 = time.time();
        fitsdata = fitsfile[0].data;
        dprint(3,"reading data took",time.time()-t0,"secs"); t0 = time.time();
        fitsfile = None;
        images = fitsdata_to_images(fitsdata);
        dprint(3,"converting to images took",time.time()-t0,"secs"); t0 = time.time();
        fitsdata = None;

      data = images[num_image];

      title = self.dp.filename;
      if nplanes > 1:
        title += ", plane #%d"%num_image;
      Purr.progressMessage("rendering %s"%title,sub=True);

      # min/max data values
      dprint(3,"rendering plane",num_image); t0 = time.time();
      datamask = ~numpy.isfinite(data);
      dprint(3,"making mask took",time.time()-t0,"secs"); t0 = time.time();
      datamin,datamax = scipy.ndimage.measurements.extrema(data,datamask,False)[:2];
      dprint(3,"computing min/max took",time.time()-t0,"secs"); t0 = time.time();
      rec.datamin,rec.datamax = datamin,datamax;
      # mean and sigma
      rec.datamean = scipy.ndimage.measurements.mean(data,datamask,False);
      dprint(3,"computing mean took",time.time()-t0,"secs"); t0 = time.time();
      rec.datastd = scipy.ndimage.measurements.standard_deviation(data,datamask,False);
      dprint(3,"computing std took",time.time()-t0,"secs"); t0 = time.time();
      # thumbnail files will be "" if images are small enough to be inlined.
      # these will be None if no histogram clipping is applied