def analyze(self, threshold_multiplier, parallel=False):
        exp_name = self.config['experiment']['name']
        tr = Tracker(exp_name, self.outdir, threshold_multiplier, self.magnification, self.microscope, self.binning)
        gen = self.readin_stacks()
        multiprocess.freeze_support()
        pool = multiprocess.Pool()
        #This may need to be a function of memory
        offset = 10 
        self.resultdir = join(os.path.dirname(self.outdir), 'results')
        #Make directory in case it doesn't exist
        fileutils.mkdir(self.resultdir)
        self.exporter.prep_csv_file(self.resultdir, self.surv_fname)

        def output(well, neurons, crop_val):
            self.exporter.export(well, neurons, crop_val)

        if not parallel:
            for data in gen:
                well, neurons = tr.track(data)
                output(well, neurons, 20)

        else:
            stacks_left = True
            while stacks_left:
                try: 
                    it = itertools.chain([next(gen)], itertools.islice(gen, 0, offset - 1))
                    for well, neurons in pool.imap_unordered(func=tr.track, iterable=it):
                        output(well, neurons, 20)
                except StopIteration:
                    stacks_left = False
        group_labels = self.config['experiment']['imaging']['group_labels']
        group_control_label = self.config['experiment']['imaging']['group_control_label']
        run_cox_analysis(self.config, self.outdir)
示例#2
0
    def get(self, dest, options=None):
        if not options:
            options = {}

        if self.source == "@":
            logging.info("Creating directory {}".format(dest))
            fileutils.mkdir(dest, overwrite=options.get("overwrite", False))
            return True

        method = options.get("method", "copy")
        if method == "copy":
            logging.info("Copying {} to {}".format(self.source, dest))
            fileutils.copy(self.source, dest)
        elif method == "rsync":
            logging.info("Rsyncing {} to {}".format(self.source, dest))
            fileutils.rsync(self.source, dest)
        elif method == "link":
            logging.info("Creating symbolic link {} to {}".format(
                dest, self.source))
            fileutils.link(self.source,
                           dest,
                           overwrite=options.get("overwrite", True))
        elif method == "hardlink":
            logging.info("Creating hard link {} to {}".format(
                dest, self.source))
            fileutils.link(self.source, dest, symbolic=False)

        return True
def scaleImage(filename, filigrane=None):
    """Common processing for one image : 
    - create a subfolder "scaled" and "thumb"
    - populate it
    
    @param filename: path to the file
    @param filigrane: None or a Signature instance (see imagizer.photo.Signature) 
     """
    rootdir = os.path.dirname(filename)
    scaledir = os.path.join(rootdir, config.ScaledImages["Suffix"])
    thumbdir = os.path.join(rootdir, config.Thumbnails["Suffix"])
    fileutils.mkdir(scaledir)
    fileutils.mkdir(thumbdir)
    photo = Photo(filename, dontCache=True)
    param = config.ScaledImages.copy()
    param.pop("Suffix")
    param["strThumbFile"] = os.path.join(scaledir, os.path.basename(filename))[:-4] + "--%s.jpg" % config.ScaledImages["Suffix"]
    photo.saveThumb(**param)
    param = config.Thumbnails.copy()
    param.pop("Suffix")
    param["strThumbFile"] = os.path.join(thumbdir, os.path.basename(filename))[:-4] + "--%s.jpg" % config.Thumbnails["Suffix"]
    photo.saveThumb(**param)
    if filigrane is not None:
        filigrane.substract(photo.pil).save(filename, quality=config.FiligraneQuality, optimize=config.FiligraneOptimize, progressive=config.FiligraneOptimize)
        try:
            os.chmod(filename, config.DefaultFileMode)
        except OSError:
            logger.warning("in scaleImage: Unable to chmod %s" % filename)
 def splitIntoPages(pathday, globalCount):
     """Split a directory (pathday) into pages of 20 images (see config.NbrPerPage)
     
     @param pathday:
     @param globalCount:
     @return: the number of images for current page   
     """
     logger.debug("In splitIntoPages %s %s", pathday, globalCount)
     files = []
     for  i in os.listdir(pathday):
         if os.path.splitext(i)[1] in config.Extensions:files.append(i)
     files.sort()
     if  len(files) > config.NbrPerPage:
         pages = 1 + (len(files) - 1) / config.NbrPerPage
         for i in range(1, pages + 1):
             folder = os.path.join(pathday, config.PagePrefix + str(i))
             fileutils.mkdir(folder)
         for j in range(len(files)):
             i = 1 + (j) / config.NbrPerPage
             filename = os.path.join(pathday, config.PagePrefix + str(i), files[j])
             self.refreshSignal.emit(globalCount, files[j])
             globalCount += 1
             shutil.move(os.path.join(pathday, files[j]), filename)
             scaleImage(filename, filigrane)
     else:
         for j in files:
             self.refreshSignal.emit(globalCount, j)
             globalCount += 1
             scaleImage(os.path.join(pathday, j), filigrane)
     return globalCount
def export_ij_rois(outpath, well, neurons, crop_val=0):
    '''This exportation method will adhere to the format present in the lab.'''
    ij_roi_dir = tempfile.mkdtemp('IJ_roi_series')
    tp_to_rois = defaultdict(list)
    for neuron in neurons:
        contours = [roi.contour + crop_val for roi in neuron.roi_series]
        for timepoint, contour in enumerate(contours):
            tp_to_rois[timepoint].append(
                (neuron.ID, encode(contour, name=str(neuron.ID))))
    for timepoint, ij_rois in tp_to_rois.items():
        roi_paths = []
        for ij_roi in ij_rois:
            roi_path = os.path.join(
                ij_roi_dir,
                well + '_' + str(timepoint) + '_' + str(ij_roi[0]) + '.roi')
            with open(roi_path, 'wb') as f:
                f.write(ij_roi[1])
                roi_paths.append(roi_path)
        ij_rois_outpath = os.path.join(outpath, 'IJ_rois')
        fileutils.mkdir(ij_rois_outpath)
        with zipfile.ZipFile(
                os.path.join(ij_rois_outpath,
                             well + '-T' + str(timepoint + 1) + '_RoiSet.zip'),
                'w') as roi_zip:
            for roi_path in roi_paths:
                #Expected roi_path is *_[0-9]+.roi
                roi_ID = int(roi_path.split('_')[-1].split('.')[0]) + 1
                roi_zip.write(roi_path, str(roi_ID) + '.roi')
    shutil.rmtree(ij_roi_dir)
示例#6
0
    def get(self, dest, options=None):
        if not options:
            options = {}

        if self.source == "@":
            logging.info("Creating directory {}".format(dest))
            fileutils.mkdir(dest, overwrite=options.get("overwrite", False))
            return True

        method = options.get("method", "copy")
        if method == "copy":
            logging.info("Copying {} to {}".format(self.source, dest))
            fileutils.copy(self.source, dest)
        elif method == "rsync":
            logging.info("Rsyncing {} to {}".format(self.source, dest))
            fileutils.rsync(self.source, dest)
        elif method == "link":
            logging.info("Creating symbolic link {} to {}".format(dest, self.source))
            fileutils.link(self.source, dest, overwrite=options.get(
                "overwrite", True))
        elif method == "hardlink":
            logging.info("Creating hard link {} to {}".format(dest, self.source))
            fileutils.link(self.source, dest, symbolic=False)

        return True
def make_output_dirs(workdir):
    j = os.path.join
    fileutils.mkdir(j(workdir, 'results'))
    fileutils.mkdir(j(workdir, 'results', 'nuclear_fractionation'))
    fileutils.mkdir(j(workdir, 'results', 'nuclear_fractionation', 'annotated_stacks'))
    fileutils.mkdir(j(workdir, 'results', 'nuclear_fractionation', 'nuclear_ROIs'))
    fileutils.mkdir(j(workdir, 'results', 'nuclear_fractionation', 'nuclear_ROIs', 'rois'))
示例#8
0
 def _export_rois(self, well, neurons, crop_val):
     outpath = join(self.outdir, 'rois')
     fileutils.mkdir(outpath)
     fname = join(outpath, str(well) + '.p')
     ID_to_data = {}
     for neuron in neurons:
         ID_to_data[neuron.ID] = neuron.roi_data_as_dict(crop_val)
         pickle.dump(ID_to_data, open(fname, 'wb'))
示例#9
0
def jar(path,
        manifest,
        sourcedir,
        options,
        preserveManifestFormatting=False,
        update=False,
        outputHandler=None):
    """ Create a jar file containing a manifest and some other files

	@param path: jar file to create. Typically this file does not already exist, but if it does 
	then the specified files or manifest will be merged into it. 
	
	@param manifest: path to the manifest.mf file (or None to disable manifest entirely)

	@param sourcedir: the directory to pack everything from (this method may add extra files to this dir)

	@param options: options map. jar.options is a list of additional arguments

	@param preserveManifestFormatting: an advanced option that prevents that jar executable from 
	reformatting the specified manifest file to comply with Java conventions 
	(also prevents manifest merging if jar already exists)
	"""
    # work out if we need to create a parent directory
    dir = os.path.dirname(path)
    if dir and not os.path.exists(dir): mkdir(dir)
    # location of jar
    if options['java.home']:
        binary = os.path.join(options['java.home'], "bin/jar")
    else:
        binary = "jar"
    # build up arguments
    args = [binary]
    args.extend(options['jar.options'])

    if update:
        mode = '-u'
    else:
        mode = '-c'

    if not manifest:
        args.extend([mode + "fM", path])
    elif preserveManifestFormatting:
        mkdir(sourcedir + '/META-INF')
        srcf = normLongPath(sourcedir + '/META-INF/manifest.mf')

        with open(manifest, 'rb') as s:
            with openForWrite(srcf, 'wb') as d:
                d.write(s.read())
        args.extend([mode + "f", path])
    else:
        args.extend([mode + "fm", path, manifest])

    if sourcedir:
        args.extend(["-C", sourcedir, "."])

    # actually call jar
    call(args, outputHandler=outputHandler, timeout=options['process.timeout'])
示例#10
0
def jar(path, manifest, sourcedir, options, preserveManifestFormatting=False, update=False, outputHandler=None):
	""" Create a jar file containing a manifest and some other files

	@param path: jar file to create. Typically this file does not already exist, but if it does 
	then the specified files or manifest will be merged into it. 
	
	@param manifest: path to the manifest.mf file (or None to disable manifest entirely)

	@param sourcedir: the directory to pack everything from (this method may add extra files to this dir)

	@param options: options map. jar.options is a list of additional arguments

	@param preserveManifestFormatting: an advanced option that prevents that jar executable from 
	reformatting the specified manifest file to comply with Java conventions 
	(also prevents manifest merging if jar already exists)
	"""
	# work out if we need to create a parent directory
	dir = os.path.dirname(path)
	if dir and not os.path.exists(dir): mkdir(dir)
	# location of jar
	if options['java.home']:
		binary = os.path.join(options['java.home'], "bin/jar")
	else:
		binary = "jar"
	# build up arguments
	args = [binary]
	args.extend(options['jar.options'])

	if update:
		mode='-u'
	else:
		mode='-c'
	
	if not manifest: 
		args.extend([mode+"fM", path])
	elif preserveManifestFormatting:
		mkdir(sourcedir+'/META-INF')
		srcf = normLongPath(sourcedir+'/META-INF/manifest.mf')

		with open(manifest, 'rb') as s:
			with openForWrite(srcf, 'wb') as d:
				d.write(s.read())
		args.extend([mode+"f", path])
	else:
		args.extend([mode+"fm", path, manifest])

	if sourcedir: 
		args.extend(["-C", sourcedir, "."])


	# actually call jar
	call(args, outputHandler=outputHandler, timeout=options['process.timeout'])
示例#11
0
    def __init__(self, workdir):
        fileutils.mkdir(join(workdir, 'analysis'))
        self.outdir = join(workdir, 'analysis')
        self.config = mfileutils.makeconfig.mfile_to_config(workdir, self.outdir) 
        self.microscope = self.config['experiment']['imaging']['microscope']
        self.magnification = self.config['experiment']['imaging']['magnification']
        self.binning = self.config['experiment']['imaging']['binning']
        #Identify directory containing images
        primary_channel = self.config['experiment']['imaging']['primary_channel']
        self.imgdir = join(workdir, 'processed_imgs', 'stacked', primary_channel) 

        fileutils.mkdir(join(workdir, 'analysis'))

        self.exporter = Exporter(self.config)
        self.exp_name = self.config['experiment']['name']
        self.surv_fname = self.exp_name + '_surv_data.csv'
示例#12
0
def javadoc(path, sources, classpath, options, outputHandler):
    """ Create javadoc from sources and a set of options

	@param path: The directory under which to create the javadoc

	@param sources: a list of source files

	@param classpath: a list of jars for the classpath

	@param options: the current set of options to use

	@param outputHandler: the output handler (optional)
	"""
    deleteDir(path)
    mkdir(path)
    # location of javadoc
    if options['java.home']:
        binary = os.path.join(options['java.home'], "bin/javadoc")
    else:
        binary = "javadoc"

    # store the list of files in a temporary file, then build from that.
    mkdir(options['tmpdir'])
    inputlistfile = os.path.join(options['tmpdir'], "javadoc.inputs")
    with openForWrite(inputlistfile, 'wb') as f:
        f.writelines(
            map(lambda x: '"' + x.replace('\\', '\\\\') + '"' + os.linesep,
                sources))

    # build up arguments
    args = [binary]
    args.extend(options['javadoc.options'])
    if options['javadoc.ignoreSourceFilesFromClasspath']:
        args.extend(['-sourcepath', path + '/xpybuild_fake_sourcepath'])
    args.extend([
        "-d", path, "-classpath", classpath, "-windowtitle",
        options['javadoc.title'], "-doctitle", options['javadoc.title'],
        "-%s" % options['javadoc.access'],
        "@%s" % inputlistfile
    ])
    # actually call javadoc
    call(args, outputHandler=outputHandler, timeout=options['process.timeout'])
示例#13
0
def javadoc(path, sources, classpath, options, outputHandler):
	""" Create javadoc from sources and a set of options

	@param path: The directory under which to create the javadoc

	@param sources: a list of source files

	@param classpath: a list of jars for the classpath

	@param options: the current set of options to use

	@param outputHandler: the output handler (optional)
	"""
	deleteDir(path)
	mkdir(path)
	# location of javadoc
	if options['java.home']:
		binary = os.path.join(options['java.home'], "bin/javadoc")
	else:
		binary = "javadoc"

	# store the list of files in a temporary file, then build from that.
	mkdir(options['tmpdir'])
	inputlistfile = os.path.join(options['tmpdir'], "javadoc.inputs")
	with openForWrite(inputlistfile, 'wb') as f:
		f.writelines(map(lambda x: '"'+x.replace('\\','\\\\')+'"'+os.linesep, sources))

	# build up arguments
	args = [binary]
	args.extend(options['javadoc.options'])
	args.extend([
		"-d", path,
		"-classpath", classpath,
		"-windowtitle", options['javadoc.title'],
		"-doctitle", options['javadoc.title'],
		"-%s" % options['javadoc.access'],
		"@%s" % inputlistfile
	])
	# actually call javadoc
	call(args, outputHandler=outputHandler, timeout=options['process.timeout'])
示例#14
0
def record(game, nextGame):
    filename = makePath(
        expandPath(AudioDirectory)
      , '.'.join([game['filename'], Encoder])
    )
    latest =  makePath(expandPath(AudioDirectory), 'latest.ogg')
    # build the commands
    recorder = ' '.join([
        'arecord'                        # audio recorder
      , '-q'                             # quiet
      , '-d {duration}'                  # recording time
      , '--max-file-time {duration}'     # recording time before switching files (must be >= recording time)
      , '-c 2'                           # input stream is 2 channels
      , '-f S16'                         # input stream is 16 bit signed
      , '-r 44100'                       # rate of input stream is 44.1kHz
      , '-D {device}'                    # audio generator
      , '-t raw'                         # output format is raw (don't use .wav, it cuts out after 3 hours and 22 minutes because of a size limit on .wav files)
    ]).format(
        duration = 3600*RecordingDuration, device = SharkAudioAddr
    )

    if Encoder == 'ogg':
        encoder = ' '.join([
            'oggenc'                     # Ogg encoder
          , '-Q'                         # quiet
          , '-r'                         # input format is raw
          , '--resample 8000'            # sample rate (8000 and 11025 are suitable choices for AM radio)
          , '--downmix'                  # convert from stereo to mono
          , '-q 0'                       # quality level (range is -1 to 10 with 10 being highest)
          , '--ignorelength'             # Allow input stream to exceed 4GB
          , '-o "{filename}"'            # output file name
          , '--title "{title} ({date})"' # title
          , '--album "{title}"'          # album
          , '--artist "{artist}"'        # artist
          , '--date "{date}"'            # date
          , '-'                          # read from standard input
        ]).format(
            filename = filename
          , title = game['desc']
          , artist = 'The 49ers'
          , date = game['date']
        )
    elif Encoder == 'mp3':
        # Still not happy with the lame options. The ones below provide a
        # reasonable filesize, but the recording sounds very tinny, removing the
        # results in a nice sounding recording, but the files are a factor of
        # two too large.
        encoder = ' '.join([
            'lame'
          , '--quiet'               # quiet
          , '--resample 8'          # resample to rate
          , '-V3'                   # ???
          , '--vbr-new'             # ???
          , '-q0'                   # quality level
          , '-B16'                  # maximum bit rate
          , '--lowpass 15.4'        # apply lowpass filter
          , '--athaa-sensitivity 1' # ???
          , '--tt "{title}"'        # title
          , '--ta "{artist}"'       # artist
          , '-'                     # read from standard input
          , '{filename}'            # write to filename
        ]).format(
            filename = filename
          , title = game['desc']
          , artist = 'The 49ers'
          , date = game['date']
        )
    elif Encoder == 'spx':
        # This generates files that sound a little better than the ogg files but
        # are much larger (odd because it is based on ogg and it tailored for
        # the spoken word, perhaps it is because I cannot get the -vbr option to
        # work). I am using the wideband option because it sounded
        # better and took less space than the narrowband option.
        encoder = ' '.join([
            'speexenc'
          , '-w'                    # wideband
          #, '--16bit'              # 16 bit raw input stream
          #, '--le'                 # little endian input stream
          #, '--stereo'             # stereo input stream
          , '--title "{title}"'     # title
          , '--author "{artist}"'   # artist
          , '-'                     # read from standard input
          , '{filename}'            # write to filename
        ]).format(
            filename = filename
          , title = game['desc']
          , artist = 'The 49ers'
          , date = game['date']
        )
    else:
        raise AssertionError, "%s: Unknown encoder" % encoder

    pipeline = '{recorder} | {encoder}'.format(
        recorder=recorder, encoder=encoder
    )

    # assure destination directory exists
    mkdir(expandPath(AudioDirectory))

    # create a symbolic link to the latest game
    remove(latest)
    try:
        os.symlink(filename, latest)
    except (IOError, OSError), err:
        sys.exit("%s: %s." % (err.filename, err.strerror))
示例#15
0
    def avendesora_archive(self):
        """
        Avendesora Archive

        Save all account information to Avendesora files.
        """
        from binascii import b2a_base64, Error as BinasciiError
        self.logger.log("Archive secrets.")
        source_files = set()
        dest_files = {}
        gpg_ids = {}
        avendesora_dir = make_path(self.settings_dir, 'avendesora')
        mkdir(avendesora_dir)
        header = dedent('''\
            # Translated Abraxas Accounts file (%s)
            # vim: filetype=python sw=4 sts=4 et ai ff=unix fileencoding='utf8' :
            #
            # It is recommended that you not modify this file directly. Instead,
            # if you wish to modify an account, copy it to an account file not
            # associated with Abraxas and modify it there. Then, to avoid
            # conflicts, add the account name to ~/.config/abraxas/do-not-export
            # and re-export the accounts using 'abraxas --export'.

            from avendesora import Account, Hidden, Question, RecognizeURL, RecognizeTitle

        ''')

        # read do-not-export file
        try:
            with open(make_path(self.settings_dir, 'do-not-export')) as f:
                do_not_export = set(f.read().split())
        except IOError as err:
            do_not_export = set([])

        def make_camel_case(text):
            text = text.translate(maketrans('@.-', '   '))
            text = ''.join([e.title() for e in text.split()])
            if text[0] in '0123456789':
                text = '_' + text
            return text

        def make_identifier(text):
            text = text.translate(maketrans('@.- ', '____'))
            if text[0] in '0123456789':
                text = '_' + text
            return text

        # Loop through accounts saving passwords and questions
        all_secrets = {}
        for account_id in self.all_accounts():
            account = self.get_account(account_id, quiet=True)
            data = account.__dict__['data']
            ID = account.__dict__['ID']
            #aliases = data.get('aliases', [])
            #if set([ID] + aliases) & do_not_export:
            if ID in do_not_export:
                print('skipping', ID)
                continue
            class_name = make_camel_case(ID)
            output = [
                'class %s(Account): # %s' % (class_name, '{''{''{1')
            ]
            # TODO -- must make ID a valid class name: convert xxx-xxx to camelcase
            self.logger.debug("    Saving %s account." % ID)

            try:
                source_filepath = data['_source_file_']
                dest_filepath = make_path(
                    avendesora_dir, rel_path(source_filepath, self.settings_dir)
                )
                if source_filepath not in source_files:
                    source_files.add(source_filepath)

                    # get recipient ids from existing file
                    if get_extension(source_filepath) in ['gpg', 'asc']:
                        try:
                            gpg = Execute(
                                ['gpg', '--list-packets', source_filepath],
                                stdout=True, wait=True
                            )
                            gpg_ids[dest_filepath] = []
                            for line in gpg.stdout.split('\n'):
                                if line.startswith(':pubkey enc packet:'):
                                    words = line.split()
                                    assert words[7] == 'keyid'
                                    gpg_ids[dest_filepath].append(words[8])
                        except ExecuteError as err:
                            print(str(err))
                    else:
                        gpg_ids[dest_filepath] = None
                    dest_files[dest_filepath] = {None: header % source_filepath}
            except KeyError:
                raise AssertionError('%s: SOURCE FILE MISSING.' % ID)
            except IOError as err:
                self.logger.error('%s: %s.' % (err.filename, err.strerror))

            output.append("    NAME = %r" % ID)
            password = self.generate_password(account)
            output.append("    passcode = Hidden(%r)" % b2a_base64(
                password.encode('ascii')).strip().decode('ascii')
            )
            questions = []
            for question in account.get_security_questions():
                # convert the result to a list rather than leaving it a tuple
                # because tuples are formatted oddly in yaml
                questions += [list(self.generate_answer(question, account))]
                self.logger.debug(
                    "    Saving question (%s) and its answer." % question)
            if questions:
                output.append("    questions = [")
                for question, answer in questions:
                    output.append("        Question(%r, answer=Hidden(%r))," % (
                        question,
                        b2a_base64(answer.encode('ascii')).strip().decode('ascii')
                    ))
                output.append("    ]")
            if 'autotype' in data:
                autotype = data['autotype'].replace('{password}', '{passcode}')
            else:
                if 'username' in data:
                    autotype = '{username}{tab}{passcode}{return}'
                else:
                    autotype = '{email}{tab}{passcode}{return}'
            discovery = []
            if 'url' in data:
                urls = [data['url']] if type(data['url']) == str else data['url']
                discovery.append('RecognizeURL(%s, script=%r)' % (
                    ', '.join([repr(e) for e in urls]), autotype
                ))
            if 'window' in data:
                windows = [data['window']] if type(data['window']) == str else data['window']
                discovery.append('RecognizeTitle(%s, script=%r)' % (
                    ', '.join([repr(e) for e in windows]), autotype
                ))
            if discovery:
                output.append("    discovery = [")
                for each in discovery:
                    output.append("        %s," % each)
                output.append("    ]")

            for k, v in data.items():
                if k in [
                    'password',
                    'security questions',
                    '_source_file_',
                    'password-type',
                    'master',
                    'num-words',
                    'num-chars',
                    'alphabet',
                    'template',
                    'url',
                    'version',
                    'autotype',
                    'window',
                ]:
                    continue
                key = make_identifier(k)
                if type(v) == str and '\n' in v:
                    output.append('    %s = """' % key)
                    for line in dedent(v.strip('\n')).split('\n'):
                        if line:
                            output.append('        %s' % line.rstrip())
                        else:
                            output.append('')
                    output.append('    """')
                else:
                    output.append("    %s = %r" % (key, v))

            output.append('')
            output.append('')
            dest_files[dest_filepath][ID] = '\n'.join(output)


        # This version uses default gpg id to encrypt files.
        # Could also take gpg ids from actual files.
        # The gpg ids are gathered from files above, but code to use them is
        # currently commented out.
        for filepath, accounts in dest_files.items():
            try:
                header = accounts.pop(None)
                contents = '\n'.join(
                    [header] + [accounts[k] for k in sorted(accounts)]
                )
                mkdir(get_head(filepath))
                os.chmod(get_head(filepath), 0o700)
                print('%s: writing.' % filepath)
                # encrypt all files with default gpg ID
                #if gpg_ids[filepath]:
                #    gpg_id = gpg_ids[filepath]
                if True:
                    if get_extension(filepath) not in ['gpg', 'asc']:
                        filepath += '.gpg'
                    gpg_id = self.accounts.get_gpg_id()
                    encrypted = self.gpg.encrypt(
                        contents, gpg_id, always_trust=True, armor=True
                    )
                    if not encrypted.ok:
                        self.logger.error(
                            "%s: unable to encrypt.\n%s" % (
                                filename, encrypted.stderr))
                    contents = str(encrypted)
                with open(filepath, 'w') as f:
                    f.write(contents)
                    os.chmod(filepath, 0o600)
            except IOError as err:
                self.logger.error('%s: %s.' % (err.filename, err.strerror))
示例#16
0
    def _create_initial_settings_files(self, gpg_id):
        """
        Create initial version of settings files for the user (PRIVATE)

        Will create initial versions of the master password file and the 
        accounts file, but only if they do not already exist. The master 
        password file is encrypted with the GPG ID given on the command line, 
        which should be the users.

        Arguments:
        Requires user's GPG ID (string) as the only argument.
        """

        def create_file(filename, contents, encrypt=False):
            if encrypt:
                encrypted = self.gpg.encrypt(
                    contents, gpg_id, always_trust=True, armor=True
                )
                if not encrypted.ok:
                    self.logger.error(
                        "%s: unable to encrypt.\n%s" % (
                            filename, encrypted.stderr))
                contents = str(encrypted)
            if is_file(filename):
                self.logger.display("%s: already exists." % filename)
            else:
                try:
                    with open(filename, 'w') as file:
                        file.write(contents)
                    os.chmod(filename, 0o600)
                    self.logger.display("%s: created." % filename)
                except IOError as err:
                    self.logger.error('%s: %s.' % (err.filename, err.strerror))

        def generate_random_string():
            # Generate a random long string to act as the default password

            from string import ascii_letters, digits, punctuation
            import random
            # Create alphabet from letters, digits, and punctuation, but 
            # replace double quote with a space so password can be safely 
            # represented as a double-quoted string.
            alphabet = (ascii_letters + digits + punctuation).replace('"', ' ')

            rand = random.SystemRandom()
            password = ''
            for i in range(64):
                password += rand.choice(alphabet)
            return password

        mkdir(self.settings_dir)
        default_password = generate_random_string()
        if self.settings_dir != expand_path(DEFAULT_SETTINGS_DIR):
            # If settings_dir is not the DEFAULT_SETTINGS_DIR, then this is
            # probably a test, in which case we do not want to use a
            # random password as it would cause the test results to vary.
            # Still want to generate the random string so that code gets
            # tested. It has been the source of trouble in the past.
            default_password = '******'
        create_file(
            self.master_password_path,
            MASTER_PASSWORD_FILE_INITIAL_CONTENTS % (
                self.dictionary.hash, SECRETS_SHA1, CHARSETS_SHA1,
                DEFAULT_ACCOUNTS_FILENAME, default_password),
            encrypt=True)
        create_file(
            self.accounts_path,
            ACCOUNTS_FILE_INITIAL_CONTENTS % (
                make_path(self.settings_dir, DEFAULT_LOG_FILENAME),
                make_path(self.settings_dir, DEFAULT_ARCHIVE_FILENAME),
                gpg_id),
            encrypt=(get_extension(self.accounts_path) in ['gpg', 'asc']))
    def start(self, lstFiles):
        """ 
        Lance les calculs pour "processSelected"
        i.e. 
        
        @param lstFiles: list of files to process
        """

        def splitIntoPages(pathday, globalCount):
            """Split a directory (pathday) into pages of 20 images (see config.NbrPerPage)
            
            @param pathday:
            @param globalCount:
            @return: the number of images for current page   
            """
            logger.debug("In splitIntoPages %s %s", pathday, globalCount)
            files = []
            for  i in os.listdir(pathday):
                if os.path.splitext(i)[1] in config.Extensions:files.append(i)
            files.sort()
            if  len(files) > config.NbrPerPage:
                pages = 1 + (len(files) - 1) / config.NbrPerPage
                for i in range(1, pages + 1):
                    folder = os.path.join(pathday, config.PagePrefix + str(i))
                    fileutils.mkdir(folder)
                for j in range(len(files)):
                    i = 1 + (j) / config.NbrPerPage
                    filename = os.path.join(pathday, config.PagePrefix + str(i), files[j])
                    self.refreshSignal.emit(globalCount, files[j])
                    globalCount += 1
                    shutil.move(os.path.join(pathday, files[j]), filename)
                    scaleImage(filename, filigrane)
            else:
                for j in files:
                    self.refreshSignal.emit(globalCount, j)
                    globalCount += 1
                    scaleImage(os.path.join(pathday, j), filigrane)
            return globalCount

        def arrangeOneFile(dirname, filename):
            """
            @param dirname:
            @param filename:
            """
            try:
                timetuple = time.strptime(filename[:19], "%Y-%m-%d_%Hh%Mm%S")
                suffix = filename[19:]
            except ValueError:
                try:
                    timetuple = time.strptime(filename[:11], "%Y-%m-%d_")
                    suffix = filename[11:]
                except ValueError:
                    logger.warning("Unable to handle such file: %s" % filename)
                    return
            daydir = os.path.join(SelectedDir, time.strftime("%Y-%m-%d", timetuple))
            os.mkdir(daydir)
            shutil.move(os.path.join(dirname, filename), os.path.join(daydir, time.strftime("%Hh%Mm%S", timetuple) + suffix))

        logger.debug("In Process Selected" + " ".join(lstFiles))
        self.startSignal.emit(self.__label, max(1, len(lstFiles)))
        if config.Filigrane:
            filigrane = Signature(config.FiligraneSource)
        else:
            filigrane = None

        SelectedDir = os.path.join(config.DefaultRepository, config.SelectedDirectory)
        self.refreshSignal.emit(-1, "copie des fichiers existants")
        if not os.path.isdir(SelectedDir):
            fileutils.mkdir(SelectedDir)
#####first of all : copy the subfolders into the day folder to help mixing the files
        AlsoProcess = 0
        for day in os.listdir(SelectedDir):
#if SingleDir : revert to a foldered structure
            DayOrFile = os.path.join(SelectedDir, day)
            if os.path.isfile(DayOrFile):
                arrangeOneFile(SelectedDir, day)
                AlsoProcess += 1
#end SingleDir normalization
            elif os.path.isdir(DayOrFile):
                if day in [config.ScaledImages["Suffix"], config.Thumbnails["Suffix"]]:
                    fileutils.recursive_delete(DayOrFile)
                elif day.find(config.PagePrefix) == 0: #subpages in SIngleDir mode that need to be flatten
                    for File in os.listdir(DayOrFile):
                        if     os.path.isfile(os.path.join(DayOrFile, File)):
                            arrangeOneFile(DayOrFile, File)
                            AlsoProcess += 1
#                        elif os.path.isdir(os.path.join(DayOrFile,File)) and File in [config.ScaledImages["Suffix"],config.Thumbnails["Suffix"]]:
#                            recursive_delete(os.path.join(DayOrFile,File))
                    fileutils.recursive_delete(DayOrFile)
                else:
                    for File in os.listdir(DayOrFile):
                        if File.find(config.PagePrefix) == 0:
                            if os.path.isdir(os.path.join(SelectedDir, day, File)):
                                for strImageFile in os.listdir(os.path.join(SelectedDir, day, File)):
                                    src = os.path.join(SelectedDir, day, File, strImageFile)
                                    dst = os.path.join(SelectedDir, day, strImageFile)
                                    if os.path.isfile(src) and not os.path.exists(dst):
                                        shutil.move(src, dst)
                                        AlsoProcess += 1
                                    if (os.path.isdir(src)) and (os.path.split(src)[1] in [config.ScaledImages["Suffix"], config.Thumbnails["Suffix"]]):
                                        shutil.rmtree(src)
                        else:
                            if os.path.splitext(File)[1] in config.Extensions:
                                AlsoProcess += 1

#######then copy the selected files to their folders###########################        
        for File in lstFiles:
            dest = os.path.join(SelectedDir, File)
            src = os.path.join(config.DefaultRepository, File)
            destdir = os.path.dirname(dest)
            if not os.path.isdir(destdir):
                fileutils.makedir(destdir)
            if not os.path.exists(dest):
                logger.info("copie de %s " % File)
                shutil.copy(src, dest)
                try:
                    os.chmod(dest, config.DefaultFileMode)
                except OSError:
                    logger.warning("Unable to chmod %s" % dest)
                AlsoProcess += 1
            else :
                logger.warning("%s existe déja" % dest)
        if AlsoProcess > 0:self.NbrJobsSignal.emit(AlsoProcess)
######copy the comments of the directory to the Selected directory 
        AlreadyDone = []
        for File in lstFiles:
            directory = os.path.split(File)[0]
            if directory in AlreadyDone:
                continue
            else:
                AlreadyDone.append(directory)
                dst = os.path.join(SelectedDir, directory, config.CommentFile)
                src = os.path.join(config.DefaultRepository, directory, config.CommentFile)
                if os.path.isfile(src):
                    shutil.copy(src, dst)

########finaly recreate the structure with pages or make a single page ########################
        logger.debug("in ModelProcessSelected, SelectedDir= %s", SelectedDir)
        dirs = [ i for i in os.listdir(SelectedDir) if os.path.isdir(os.path.join(SelectedDir, i))]
        dirs.sort()
        if config.ExportSingleDir: #SingleDir
            #first move all files to the root
            for day in dirs:
                daydir = os.path.join(SelectedDir, day)
                for filename in os.listdir(daydir):
                    try:
                        timetuple = time.strptime(day[:10] + "_" + filename[:8], "%Y-%m-%d_%Hh%Mm%S")
                        suffix = filename[8:]
                    except ValueError:
                        try:
                            timetuple = time.strptime(day[:10], "%Y-%m-%d")
                            suffix = filename
                        except ValueError:
                            logger.info("Unable to handle dir: %s\t file: %s" , day, filename)
                            continue
                    src = os.path.join(daydir, filename)
                    dst = os.path.join(SelectedDir, time.strftime("%Y-%m-%d_%Hh%Mm%S", timetuple) + suffix)
                    shutil.move(src, dst)
                fileutils.recursive_delete(daydir)
            splitIntoPages(SelectedDir, 0)
        else: #Multidir
            logger.debug("in Multidir, dirs= " + " ".join(dirs))
            globalCount = 0
            for day in dirs:
                globalCount = splitIntoPages(os.path.join(SelectedDir, day), globalCount)

        self.finishSignal.emit()
    def start(self, lstFiles):
        """ 
        Lance les calculs
        
        @param lstFiles: list of files to process
        """
        self.startSignal.emit(self.__label, max(1, len(lstFiles)))
        if config.Filigrane:
            filigrane = Signature(config.FiligraneSource)
        else:
            filigrane = None

        SelectedDir = os.path.join(config.DefaultRepository, config.SelectedDirectory)
        self.refreshSignal.emit(-1, "copie des fichiers existants")
        if not os.path.isdir(SelectedDir):     fileutils.mkdir(SelectedDir)
#####first of all : copy the subfolders into the day folder to help mixing the files
        for day in os.listdir(SelectedDir):
            for File in os.listdir(os.path.join(SelectedDir, day)):
                if File.find(config.PagePrefix) == 0:
                    if os.path.isdir(os.path.join(SelectedDir, day, File)):
                        for strImageFile in os.listdir(os.path.join(SelectedDir, day, File)):
                            src = os.path.join(SelectedDir, day, File, strImageFile)
                            dst = os.path.join(SelectedDir, day, strImageFile)
                            if os.path.isfile(src) and not os.path.exists(dst):
                                shutil.move(src, dst)
                            if (os.path.isdir(src)) and (os.path.split(src)[1] in [config.ScaledImages["Suffix"], config.Thumbnails["Suffix"]]):
                                shutil.rmtree(src)

#######then copy the selected files to their folders###########################        
        globalCount = 0
        for File in lstFiles:
            dest = os.path.join(SelectedDir, File)
            src = os.path.join(config.DefaultRepository, File)
            destdir = os.path.dirname(dest)
            self.refreshSignal.emit(globalCount, File)
            globalCount += 1
            if not os.path.isdir(destdir):
                fileutils.makedir(destdir)
            if not os.path.exists(dest):
                if filigrane:
                    image = Image.open(src)
                    filigrane.substract(image).save(dest, quality=config.FiligraneQuality, optimize=config.FiligraneOptimize, progressive=config.FiligraneOptimize)
                else:
                    shutil.copy(src, dest)
                try:
                    os.chmod(dest, config.DefaultFileMode)
                except OSError:
                    logger.warning("In ModelCopySelected: unable to chmod %s", dest)
            else :
                logger.info("In ModelCopySelected: %s already exists", dest)
######copy the comments of the directory to the Selected directory 
        AlreadyDone = []
        for File in lstFiles:
            directory = os.path.split(File)[0]
            if directory in AlreadyDone:
                continue
            else:
                AlreadyDone.append(directory)
                dst = os.path.join(SelectedDir, directory, config.CommentFile)
                src = os.path.join(config.DefaultRepository, directory, config.CommentFile)
                if os.path.isfile(src):
                    shutil.copy(src, dst)
        self.finishSignal.emit()
示例#19
0
def javac(output, inputs, classpath, options, logbasename, targetname):
	""" Compile some java files to class files.

	Will raise BuildException if compilation fails.

	@param output: path to a directory in which to put the class files (will be created)

	@param inputs: list of paths (.java files) to be compiled

	@param classpath: classpath to compile with, as a string

	@param options: options map. javac.options is a list of additional arguments, javac.source is the source version, 
	javac.target is the target version

	@param logbasename: absolute, expanded, path to a directory and filename prefix 
		to use for files such as .err, .out, etc files

	@param targetname: to log appropriate error messages

	"""

	assert logbasename and '$' not in logbasename
	logbasename = os.path.normpath(logbasename)
	# make the output directory
	if not os.path.exists(output): mkdir(output)
	# location of javac
	if options['java.home']:
		javacpath = os.path.join(options['java.home'], "bin/javac")
	else:
		javacpath = "javac" # just get it from the path
	# store the list of files in a temporary file, then build from that.
	mkdir(options['tmpdir'])
	
	argsfile = os.path.join(options['tmpdir'], "javac_args.txt")
	
	# build up the arguments
	args = ["-d", output]
	if options["javac.source"]: args.extend(["-source", options["javac.source"]])
	if options["javac.target"]: args.extend(["-source", options["javac.target"]])
	if options["javac.encoding"]: args.extend(["-encoding", options["javac.encoding"]])
	if options["javac.debug"]:
		args.append('-g')
	if options['javac.warningsAsErrors']:
		args.append('-Werror')
	# TODO: should add -Xlint options here I think
		
	args.extend(getStringList(options['javac.options']))
	if classpath: args.extend(['-cp', classpath])
	args.extend([x for x in inputs if x.endswith('.java')]) # automatically filter out non-java files

	with openForWrite(argsfile, 'wb') as f:
		for a in args:
			a = '"%s"'%a.replace('\\','\\\\')
			print >>f, a

	success=False
	try:

		log.info('Executing javac for %s, writing output to %s: %s', targetname, logbasename+'.out', ''.join(['\n\t"%s"'%x for x in [javacpath]+args]))
		
		# make sure we have no old ones hanging around still
		try:
			deleteFile(logbasename+'-errors.txt', allowRetry=True)
			deleteFile(logbasename+'-warnings.txt', allowRetry=True)
			deleteFile(logbasename+'.out', allowRetry=True)
		except Exception as e:
			log.info('Cleaning up file failed: %s' % e)
		
		outputHandler = JavacProcessOutputHandler(targetname, options=options)
		outputHandler.setJavacLogBasename(logbasename)
		
		call([javacpath, "@%s" % argsfile], outputHandler=outputHandler, outputEncoding='UTF-8', cwd=output, timeout=options['process.timeout'])
		
		if (not os.listdir(output)): # unlikely, but useful failsafe
			raise EnvironmentError('javac command failed to create any target files (but returned no error code); see output at "%s"'%(logbasename+'.out'))
		success = True
	finally:
		if not success and classpath:
			log.info('Classpath for failed javac was: \n   %s', '\n   '.join(classpath.split(os.pathsep)))
    def start(self, rootDir):
        """ Lance les calculs
        
        @param rootDir: top level directory to start processing
        @return: 2tuple containing the list of all images and the start-index
        @rtype: (list,integer)
        """
        config.DefaultRepository = rootDir
        AllJpegs = fileutils.findFiles(rootDir)
        AllFilesToProcess = []
        AllreadyDone = []
        NewFiles = []
        uid = os.getuid()
        gid = os.getgid()
        for i in AllJpegs:
            if i.find(config.TrashDirectory) == 0: continue
            if i.find(config.SelectedDirectory) == 0: continue
            try:
                a = int(i[:4])
                m = int(i[5:7])
                j = int(i[8:10])
                if (a >= 0000) and (m <= 12) and (j <= 31) and (i[4] in ["-", "_", "."]) and (i[7] in ["-", "_"]):
                    AllreadyDone.append(i)
                else:
                    AllFilesToProcess.append(i)
            except ValueError:
                AllFilesToProcess.append(i)
        AllFilesToProcess.sort()
        NumFiles = len(AllFilesToProcess)
        self.startSignal.emit(self.__label, NumFiles)
        for h in range(NumFiles):
            i = AllFilesToProcess[h]
            self.refreshSignal.emit(h, i)
            myPhoto = Photo(i, dontCache=True)
            data = myPhoto.readExif()
            try:
                datei, heurei = data["Heure"].split()
                date = re.sub(":", "-", datei)
                heurej = re.sub(":", "h", heurei, 1)
                model = data["Modele"].split(",")[-1]
                heure = unicode2ascii("%s-%s.jpg" % (re.sub(":", "m", heurej, 1), re.sub("/", "", re.sub(" ", "_", model))))
            except ValueError:
                date = time.strftime("%Y-%m-%d", time.gmtime(os.path.getctime(os.path.join(rootDir, i))))
                heure = unicode2ascii("%s-%s.jpg" % (time.strftime("%Hh%Mm%S", time.gmtime(os.path.getctime(os.path.join(rootDir, i)))), re.sub("/", "-", re.sub(" ", "_", os.path.splitext(i)[0]))))
            if not (os.path.isdir(os.path.join(rootDir, date))) :
                fileutils.mkdir(os.path.join(rootDir, date))
#            strImageFile = os.path.join(rootDir, date, heure)
            ToProcess = os.path.join(date, heure)
            bSkipFile = False
            for strImageFile in fileutils.list_files_in_named_dir(rootDir, date, heure):
                logger.warning("%s -x-> %s", i, strImageFile)
                existing = Photo(strImageFile, dontCache=True)
                try:
                    existing.readExif()
                    originalName = existing.exif["Exif.Photo.UserComment"]
                except:
                    logger.error("in ModelRangeTout: reading Exif for %s", i)
                else:
                    if "human_value" in dir(originalName):
                        originalName = originalName.human_value
                    if os.path.basename(originalName) == os.path.basename(i):
                        logger.info("File already in repository, leaving as it is")
                        bSkipFile = True
                        continue #to next file, i.e. leave the existing one
            if bSkipFile:
                continue
            else:
                strImageFile = os.path.join(rootDir, date, heure)
            if os.path.isfile(strImageFile):
                s = 0
                for j in os.listdir(os.path.join(rootDir, date)):
                    if j.find(heure[:-4]) == 0:s += 1
                ToProcess = os.path.join(date, heure[:-4] + "-%s.jpg" % s)
                strImageFile = os.path.join(rootDir, ToProcess)
            shutil.move(os.path.join(rootDir, i), strImageFile)
            try:
                os.chown(strImageFile, uid, gid)
                os.chmod(strImageFile, config.DefaultFileMode)
            except OSError:
                logger.warning("in ModelRangeTout: unable to chown ot chmod  %s" , strImageFile)
            myPhoto = Photo(strImageFile, dontCache=True)
#            Save the old image name in exif tag
            myPhoto.storeOriginalName(i)

            if config.AutoRotate:
                myPhoto.autorotate()
            AllreadyDone.append(ToProcess)
            NewFiles.append(ToProcess)
        AllreadyDone.sort()
        self.finishSignal.emit()

        if len(NewFiles) > 0:
            FirstImage = min(NewFiles)
            return AllreadyDone, AllreadyDone.index(FirstImage)
        else:
            return AllreadyDone, 0