Beispiel #1
0
    def _persist_symlink(self, abspath):
        """Persist symbolic link and bind mount it back to its current location
        """
        persisted_path = self._config_path(abspath)
        current_target = os.readlink(abspath)
        if os.path.exists(persisted_path):
            stored_target = os.readlink(persisted_path)
            if stored_target == current_target:
                self._logger.warn('Symlink "%s" had already been persisted',
                                  abspath)
                return
            else:
                # Write the new symlink to an alternate location and atomically
                # rename
                self._prepare_dir(abspath, persisted_path)
                tmp_path = persisted_path + '.ovirtnode.atom'
                try:
                    os.symlink(current_target, tmp_path)
                except Exception:
                    raise
                else:
                    os.rename(tmp_path, persisted_path)
        else:
            self._prepare_dir(abspath, persisted_path)
            os.symlink(current_target, persisted_path)

        self.copy_attributes(abspath, persisted_path)
        self._logger.info('Symbolic link "%s" successfully persisted', abspath)
        self._add_path_entry(abspath)
  def __WritePickled(self, obj, filename):
    """Pickles the object and writes it to the given file.
    """
    if not filename or filename == '/dev/null' or not obj:
      return


    descriptor, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename))
    tmpfile = os.fdopen(descriptor, 'wb')





    pickler = pickle.Pickler(tmpfile, protocol=1)
    pickler.fast = True
    pickler.dump(obj)

    tmpfile.close()

    self.__file_lock.acquire()
    try:
      try:

        os.rename(tmp_filename, filename)
      except OSError:

        try:
          os.remove(filename)
        except:
          pass
        os.rename(tmp_filename, filename)
    finally:
      self.__file_lock.release()
def splitFasta (seqFile, segments):
    #Split sequence file into
    #  as many fragments as appropriate
    #   depending on the size of original_fasta
    #
    #   Clean up any segment files from previous runs before creating new one
    #
    for i in segments:
        os.unlink(i)
    current_file_index = 0
    #current_size = 0
    for line in open(original_fasta):
        #
        # start a new file for each accession line
        #
        if line[0] == '>':
            #current_size +=1
            current_file_index += 1
            #if (current_size >= pack_size)
            file_name = "%d.segment" % current_file_index
            file_path = os.path.join(temp_directory, file_name)
            current_file = open(file_path, "w")
            #current_size = 0
        if current_file_index:
           current_file.write(line) 
    end_file = open(os.path.join(temp_directory, "end_tmp"), 'w')
    end_file.write(str(current_file_index))
    end_file.close()
    os.rename(os.path.join(temp_directory, "end_tmp"), os.path.join(temp_directory, "end"))
Beispiel #4
0
 def doRollover(self):
     # the follwoing code is an 1:1 copy from /usr/lib/python/2.5/loging/handlers.py...
     self.stream.close()
     # get the time that this sequence started at and make it a TimeTuple
     t = self.rolloverAt - self.interval
     timeTuple = time.localtime(t)
     dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
     if os.path.exists(dfn):
         os.remove(dfn)
     os.rename(self.baseFilename, dfn)
     if self.backupCount > 0:
         # find the oldest log file and delete it
         s = glob.glob(self.baseFilename + ".20*")
         if len(s) > self.backupCount:
             s.sort()
             os.remove(s[0])
     #print "%s -> %s" % (self.baseFilename, dfn)
     if self.encoding:
         self.stream = codecs.open(self.baseFilename, 'w', self.encoding)
     else:
         self.stream = open(self.baseFilename, 'w')
     self.rolloverAt = self.rolloverAt + self.interval
     # ...copy of code ends here
     # create the zip file
     if os.path.exists(dfn + ".zip"):
         os.remove(dfn + ".zip")
     file = zipfile.ZipFile(dfn + ".zip", "w")
     file.write(dfn, os.path.basename(dfn), zipfile.ZIP_DEFLATED)
     file.close()
     os.remove(dfn)
Beispiel #5
0
def slicethread(fname, oname, wname, cfg, jobid):
  retcode = "fail"
  try:
    con = sqlite3.connect('db.sqlite')
    con.row_factory = sqlite3.Row

    cfg = "config.ini" if cfg is None else cfg

    proc = subprocess.Popen(["slic3r",
      "--load", cfg,
      fname, "-o", wname+'.gcode'])
    con.execute('insert into journal(cmd, pid, action, status, timestamp) values(?,?,?,?,DateTime(\'now\'))',
      ('slice {} -c {}'.format(os.path.basename(fname),
                               os.path.basename(cfg)), proc.pid, 'start',
        0 if proc.returncode == None else 1 ))
    con.commit()
    retcode = proc.wait()
    con.execute('insert into journal(cmd, pid, action, status, timestamp) values(?,?,?,?,DateTime(\'now\'))',
      ('slice {} -c {}'.format(os.path.basename(fname),
                               os.path.basename(cfg)), proc.pid, 'stop',
        proc.returncode))
    con.commit()
    try:
      os.unlink(oname+'.gcode')
    except OSError as e:
      pass
    finally:
      try:
        os.rename(wname+'.gcode', oname+'.gcode')
      except Exception:
        logging.info( wname+'.gcode')
        logging.info( oname+'.gcode')
        pass
  finally:
    _work_done(jobid, val=retcode)
Beispiel #6
0
 def write(self, cr, uid, ids, vals, context=None):
     if not isinstance(ids, list):
         ids = [ids]
     if vals.get("filename") and not vals.get("extension"):
         vals["filename"], vals["extension"] = os.path.splitext(vals["filename"])
     upd_ids = ids[:]
     if vals.get("filename") or vals.get("extension"):
         images = self.browse(cr, uid, upd_ids, context=context)
         for image in images:
             old_full_path = self._image_path(cr, uid, image, context=context)
             if not old_full_path:
                 continue
             # all the stuff below is there to manage the files on the filesystem
             if (
                 vals.get("filename")
                 and (image.name != vals["filename"])
                 or vals.get("extension")
                 and (image.extension != vals["extension"])
             ):
                 super(product_images, self).write(cr, uid, image.id, vals, context=context)
                 upd_ids.remove(image.id)
                 if "file" in vals:
                     # a new image have been loaded we should remove the old image
                     # TODO it's look like there is something wrong with function
                     # field in openerp indeed the preview is always added in the write :(
                     if os.path.isfile(old_full_path):
                         os.remove(old_full_path)
                 else:
                     new_image = self.browse(cr, uid, image.id, context=context)
                     new_full_path = self._image_path(cr, uid, new_image, context=context)
                     # we have to rename the image on the file system
                     if os.path.isfile(old_full_path):
                         os.rename(old_full_path, new_full_path)
     return super(product_images, self).write(cr, uid, upd_ids, vals, context=context)
def populate():
	files = {}
	folders = {}
	new_folders = {}
	path = os.getcwd()
	#read all the files and get all the extentions
	all_dirs = os.listdir(path)
	for folders_file in all_dirs:
		if isfile(join(path , folders_file)):
			#it is a file
			#get the extension of the files
			temp = folders_file.split('.')
			extn = temp[-1];
			files[folders_file] = extn
			print(extn)
			new_folders[extn] = True
		else:
			#it is a directory
			#print folders_file
			folders[folders_file] = True

	#create all the necessary folders
	for f in new_folders:
		if f not in folders:
			#creata a new folder
			os.makedirs(path+'/'+f)

	#move the files
	for f in files:
		# do somethning
		os.rename(path+'/'+f , path+'/' + files[f]+'/'+f)
Beispiel #8
0
    def test_moved_file(self):
        """
        Move a file, then create an error that references a definition in it.
        Check that the new file name is displayed in the error.
        """

        self.write_load_config(
            'foo_1.php', 'foo_2.php', 'bar_2.php',
        )

        os.rename(
            os.path.join(self.repo_dir, 'foo_2.php'),
            os.path.join(self.repo_dir, 'bar_2.php'),
        )

        with open(os.path.join(self.repo_dir, 'foo_1.php'), 'w') as f:
            f.write("""
            <?hh
            function f(): string {
                return g();
            }
            """)

        self.check_cmd([
            '{root}foo_1.php:4:24,26: Invalid return type (Typing[4110])',
            '  {root}foo_1.php:3:27,32: This is a string',
            '  {root}bar_2.php:3:23,25: It is incompatible with an int',

            ])
def open_tempfile_with_atomic_write_to(path, **kwargs):
    """
    Open a temporary file object that atomically moves to the specified
    path upon exiting the context manager.

    Supports the same function signature as `open`.

    The parent directory exist and be user-writable.

    WARNING: This is just like 'mv', it will clobber files!
    """
    parent_directory = os.path.dirname(path)
    _tempfile = tempfile.NamedTemporaryFile(delete=False, dir=parent_directory)
    _tempfile.close()
    tempfile_path = _tempfile.name
    try:
        with open(tempfile_path, **kwargs) as file:
            yield file
            file.flush()
            os.fsync(file.fileno())
        os.rename(tempfile_path, path)
    finally:
        try:
            os.remove(tempfile_path)
        except OSError as e:
            if e.errno == errno.ENOENT:
                pass
            else:
                raise e
Beispiel #10
0
 def try_rename(self, old_filename, new_filename):
     try:
         if old_filename == new_filename:
             return
         os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
     except (IOError, OSError) as err:
         self.report_error('unable to rename file: %s' % error_to_compat_str(err))
Beispiel #11
0
	def store(self):
		"""
		Store data for next runs, set the attributes listed in :py:const:`waflib.Build.SAVED_ATTRS`. Uses a temporary
		file to avoid problems on ctrl+c.
		"""
		data = {}
		for x in SAVED_ATTRS:
			data[x] = getattr(self, x)
		db = os.path.join(self.variant_dir, Context.DBFILE)

		try:
			Node.pickle_lock.acquire()
			Node.Nod3 = self.node_class
			x = cPickle.dumps(data, PROTOCOL)
		finally:
			Node.pickle_lock.release()

		Utils.writef(db + '.tmp', x, m='wb')

		try:
			st = os.stat(db)
			os.remove(db)
			if not Utils.is_win32: # win32 has no chown but we're paranoid
				os.chown(db + '.tmp', st.st_uid, st.st_gid)
		except (AttributeError, OSError):
			pass

		# do not use shutil.move (copy is not thread-safe)
		os.rename(db + '.tmp', db)
  def run (self):
    # load mseed file
    print "tmbo: loading mseed: %s.." % self.mseedf
    self.st = read (self.mseedf)

    # byte swap
    self.byteswap ()

    if self.plot:
      self.setupplot ()

    # figure out new name
    root = os.path.dirname (self.mseedf)
    base = os.path.basename (self.mseedf)
    self.name = base

    if not self.replace:
      self.name += '.new'

    if not self.nowrite:
      if self.replace and self.backup:
        print "tmbo: backing up existing file to: %s.." % (self.mseedf + '.bs.bak')
        os.rename (self.mseedf, self.mseedf + '.bs.bak')


      # writing
      print "tmbo: writing new file: %s.." % os.path.join (root, self.name)
      self.newst.write (os.path.join (root, self.name), format = 'MSEED', encoding = 'INT32', byteorder = 1, flush = 1, verbose = 0)

    else:
      if self.replace and self.backup:
        print "tmbo: backing up existing file to: %s (disabled).." % (self.mseedf + '.bs.bak')

      print "tmbo: writing new file: %s (disabled).." % os.path.join (root, self.name)
Beispiel #13
0
 def save(self, delta):
     if self.deltas:
       # save data when converting to new format
       #print("Full save:", self.filename)
       # rename old file, do not close it to leave it locked
       old_f = self.f
       self.deltas[delta[0]] = delta[1:] # add current values
       self.compress()
       self.f = self.open(self.filename+'.tmp', "wb")
       if self.header_format:
         self.f.write(self.header())
       for t in sorted(self.deltas, reverse=True):
         self.f.write(self.data_format
           % tuple([t]+[self.store_value(x) for x in self.deltas[t]])
         )
       self.f.close()
       if BACKUP:
         os.rename(self.filename, self.filename+"~")
       os.rename(self.filename+'.tmp', self.filename)
       old_f.close() # close old file after rename
     else:
       # Avoid change length of header here.
       # Update header only when does not change it's length or on full save!
       if self.header_format:
         header = self.header()
         if len(header)==self.header_length:
           self.f.seek(0)
           self.f.write(header)
       self.f.seek(0, 2) # EOF
       self.f.write(self.data_format % delta)
       self.f.close()
Beispiel #14
0
    def _configure_efi_bootloader(self, isodir):
        """Set up the configuration for an EFI bootloader"""
        fs_related.makedirs(isodir + "/EFI/boot")

        if not self.__copy_efi_files(isodir):
            shutil.rmtree(isodir + "/EFI")
            return

        for f in os.listdir(isodir + "/isolinux"):
            os.link("%s/isolinux/%s" %(isodir, f),
                    "%s/EFI/boot/%s" %(isodir, f))


        cfg = self.__get_basic_efi_config(name = self.name,
                                          timeout = self._timeout)
        cfg += self.__get_efi_image_stanzas(isodir, self.name)

        cfgf = open(isodir + "/EFI/boot/grub.conf", "w")
        cfgf.write(cfg)
        cfgf.close()

        # first gen mactel machines get the bootloader name wrong apparently
        if rpmmisc.getBaseArch() == "i386":
            os.link(isodir + "/EFI/boot/grub.efi",
                    isodir + "/EFI/boot/boot.efi")
            os.link(isodir + "/EFI/boot/grub.conf",
                    isodir + "/EFI/boot/boot.conf")

        # for most things, we want them named boot$efiarch
        efiarch = {"i386": "ia32", "x86_64": "x64"}
        efiname = efiarch[rpmmisc.getBaseArch()]
        os.rename(isodir + "/EFI/boot/grub.efi",
                  isodir + "/EFI/boot/boot%s.efi" %(efiname,))
        os.link(isodir + "/EFI/boot/grub.conf",
                isodir + "/EFI/boot/boot%s.conf" %(efiname,))
Beispiel #15
0
    def export_data(self):
        if not self._path:
            return
        temp_path = self._path + '_%s.tmp' % uuid.uuid4().hex
        try:
            data = self._data.copy()
            timers = self._timers.keys()
            commit_log = copy.copy(self._commit_log)

            with open(temp_path, 'w') as db_file:
                os.chmod(temp_path, 0600)
                export_data = []

                for key in data:
                    key_ttl = data[key]['ttl']
                    key_val = data[key]['val']
                    key_type = type(key_val).__name__
                    if key_type == 'set' or key_type == 'deque':
                        key_val = list(key_val)
                    export_data.append((key, key_type, key_ttl, key_val))

                db_file.write(json.dumps({
                    'ver': 1,
                    'data': export_data,
                    'timers': timers,
                    'commit_log': commit_log,
                }))
            os.rename(temp_path, self._path)
        except:
            try:
                os.remove(temp_path)
            except OSError:
                pass
            raise
Beispiel #16
0
 def put_files_cache(self):
     if getattr(self, "cached", None):
         return None
     sig = self.signature()
     ssig = Utils.to_hex(self.uid()) + Utils.to_hex(sig)
     dname = os.path.join(self.generator.bld.cache_global, ssig)
     tmpdir = tempfile.mkdtemp(prefix=self.generator.bld.cache_global + os.sep + "waf")
     try:
         shutil.rmtree(dname)
     except:
         pass
     try:
         for node in self.outputs:
             dest = os.path.join(tmpdir, node.name)
             shutil.copy2(node.abspath(), dest)
     except (OSError, IOError):
         try:
             shutil.rmtree(tmpdir)
         except:
             pass
     else:
         try:
             os.rename(tmpdir, dname)
         except OSError:
             try:
                 shutil.rmtree(tmpdir)
             except:
                 pass
         else:
             try:
                 os.chmod(dname, Utils.O755)
             except:
                 pass
Beispiel #17
0
 def __restore_file(self,path):
     try:
         os.unlink(path)
     except:
         pass
     if os.path.exists(path + '.rpmnew'):
         os.rename(path + '.rpmnew', path)
    def run_once(self, test_name):
        if test_name == 'setup':
            return
        #
        # We need to be sure we run this on the right target machines
        # as this is really quite destructive!
        #
        if not os.uname()[1] in self.valid_clients:
            return

        date_start = time.strftime("%Y-%m-%d")
        time_start = time.strftime("%H%M")

        output = ''
        #
        # Test 3 different I/O schedulers:
        #
        for iosched in ['cfq', 'deadline', 'noop']:
            #
            # Test 5 different file systems, across 20+ tests..
            #
            os.chdir(self.fio_tests_dir)
            cmd = './test.sh'
            cmd += ' -d ' + self.dev + '1 -m 8G -S -s ' + iosched + ' -f ext2,ext3,ext4,xfs,btrfs'
            cmd += ' -D ' + date_start + ' -T ' + time_start
            output += utils.system_output(cmd, retain_output=True)

        #
        # Move the results from the src tree into the autotest results tree where it will automatically
        # get picked up and copied over to the jenkins server.
        #
        os.rename(os.path.join(self.srcdir, 'fs-test-proto'), os.path.join(self.resultsdir, 'fs-test-proto'))
Beispiel #19
0
def dump():
    assert isinstance(application.options, OptionsCore), 'Invalid application options %s' % application.options
    if not application.options.writeConfigurations: return
    if not __debug__:
        print('Cannot dump configuration file if python is run with "-O" or "-OO" option', file=sys.stderr)
        sys.exit(1)
    configFile = application.options.configurationPath
    try:
        if os.path.isfile(configFile):
            with open(configFile, 'r') as f: config = load(f)
        else: config = {}
        
        assembly = application.assembly = ioc.open(aop.modulesIn('__setup__.**'), config=config)
        assert isinstance(assembly, Assembly), 'Invalid assembly %s' % assembly
        try:
            if os.path.isfile(configFile): os.rename(configFile, configFile + '.bak')
            for config in assembly.configurations: assembly.processForName(config)
            # Forcing the processing of all configurations
            with open(configFile, 'w') as f: save(assembly.trimmedConfigurations(), f)
            print('Created "%s" configuration file' % configFile)
        finally: ioc.deactivate()
    except SystemExit: raise
    except:
        print('-' * 150, file=sys.stderr)
        print('A problem occurred while dumping configurations', file=sys.stderr)
        traceback.print_exc(file=sys.stderr)
        print('-' * 150, file=sys.stderr)
Beispiel #20
0
    def getEventRanges(self):
        if len(self.__eventRanges.keys()) > 5:
            return
        outputFiles = self.getUnstagedOutputFiles()
        for file in outputFiles:
            if len(self.__eventRanges.keys()) > 5:
                return
            self.__startWait = None
            self.__eventRanges[file] = {}
            self.__eventRanges_staged[file] = []
            self.__eventRanges_faileStaged[file] = []

            filepath = os.path.join(self.__workDir, file)
            handle = open(filepath)
            for line in handle:
                if len(line.strip()) == 0:
                    continue
                line = line.replace("  ", " ")
                jobId, eventRange, status, output = line.split(" ")
                output = output.split(",")[0]
                self.__eventRanges[file][eventRange] = {'retry':0, 'event': (jobId, eventRange, status, output)}
                self.__threadpool.add_task(self.stageOutEvent, (file, jobId, eventRange, status, output))
                if jobId not in self.__processedJobs:
                    self.__processedJobs.append(jobId)
            handle.close()
            os.rename(filepath, filepath + ".staging")
Beispiel #21
0
def move_desktop_file(root, target_data, prefix):
    # The desktop file is rightly installed into install_data.  But it should
    # always really be installed into prefix, because while we can install
    # normal data files anywhere we want, the desktop file needs to exist in
    # the main system to be found.  Only actually useful for /opt installs.

    old_desktop_path = os.path.normpath(root + target_data +
                                        '/share/applications')
    old_desktop_file = old_desktop_path + '/remarkable.desktop'
    desktop_path = os.path.normpath(root + prefix + '/share/applications')
    desktop_file = desktop_path + '/remarkable.desktop'

    if not os.path.exists(old_desktop_file):
        print ("ERROR: Can't find", old_desktop_file)
        sys.exit(1)
    elif target_data != prefix + '/':
        # This is an /opt install, so rename desktop file to use extras-
        desktop_file = desktop_path + '/extras-remarkable.desktop'
        try:
            os.makedirs(desktop_path)
            os.rename(old_desktop_file, desktop_file)
            os.rmdir(old_desktop_path)
        except OSError as e:
            print ("ERROR: Can't rename", old_desktop_file, ":", e)
            sys.exit(1)

    return desktop_file
def convertqcptowav(qcpfile, wavfile):

    pvconv=shortfilename(gethelperbinary('pvconv'))

    q_name=shortfilename(qcpfile)

    w_name=common.stripext(q_name)+'.wav'

    try:

        os.remove(w_name)

    except:

        pass

    run(pvconv, q_name)

    try:

        os.remove(wavfile)

    except:

        pass

    os.rename(w_name, wavfile)
Beispiel #23
0
 def getFile(self, tryDownload=True):
     if self.archive and os.path.isfile(self.archive):
         if self.archive.lower().endswith(".gz"):
             return self.archive, gzip.open, None
         elif self.archive.lower().endswith(".zip"):
             z = ZipFile(self.archive)
             return self.specifiedMeshName, z.open, z.close
         else:
             raise IOError("Unsupported archive type")
     if os.path.isfile(self.meshName):
         return self.meshName, open, None
     if os.path.isfile(self.meshName + ".gz"):
         return self.meshName + ".gz", gzip.open, None
     if tryDownload and (self.url or self.urlgz):
         self.message("Downloading mesh")
         urlzip = False
         if self.urlgz:
             url = self.urlgz
             outName = self.meshName + ".gz"
         elif self.url:
             url = self.url
             if self.archive:
                 outName = self.archive
             else:
                 outName = self.meshName
         content = urllib.request.urlopen(url).read()
         with open(outName + ".tempDownload","wb") as f:
             f.write(content)
         os.rename(outName + ".tempDownload", outName)
         self.message("Downloaded")
         return self.getFile()
     else:
         raise IOError("File not found")
Beispiel #24
0
    def get_wallet_path(self):
        """Set the path of the wallet."""

        # command line -w option
        path = self.get('wallet_path')
        if path:
            return path

        # path in config file
        path = self.get('default_wallet_path')
        if path and os.path.exists(path):
            return path

        # default path
        dirpath = os.path.join(self.path, "wallets")
        if not os.path.exists(dirpath):
            os.mkdir(dirpath)

        new_path = os.path.join(self.path, "wallets", "default_wallet")

        # default path in pre 1.9 versions
        old_path = os.path.join(self.path, "electrum.dat")
        if os.path.exists(old_path) and not os.path.exists(new_path):
            os.rename(old_path, new_path)

        return new_path
Beispiel #25
0
 def dump_file(self, content):
     counter = 0
     while True:
         while True:
             try:
                 fl = open(self.PATH+'.tmp', 'wb')
                 pickle.dump(content, fl)
                 fl.close()
                 fl = open(self.PATH+'.tmp','rb')
                 h2 = pickle.load(fl)
                 fl.close()
                 assert h2 == content
                 break
             except:
                 #print '\nThere was an error dumping the history!\n'\
                 #'This happened %d times so far, trying again...'%(counter)
                 counter+=1
         try:
             if os.path.exists(self.PATH):
                 os.remove(self.PATH)
             os.rename(self.PATH+'.tmp',self.PATH)
             fl = open(self.PATH,'rb')
             h2 = pickle.load(fl)
             fl.close()
             assert h2 == content
             break
         except:
             pass
Beispiel #26
0
def main(max_stations=0, folder='.'):
    try:
        makedirs(output_folder+'/'+folder)
    except OSError:
        pass

    all_files = [ f for f in listdir(data_folder) if isfile(join(data_folder,f)) and f.endswith('.gz') ]
    
    for ndf in all_files:
        string = '_%dstations' % max_stations
        new_name=ndf[:-7]+string+ndf[-7:]
        rename(data_folder+'/'+ndf, data_folder+'/'+new_name)
        
    all_files = [ f for f in listdir(data_folder) if isfile(join(data_folder,f)) and f.endswith('.gz') ]
    
    for a_f in all_files:
        move(data_folder+'/'+a_f, output_folder+'/'+folder+'/'+a_f)
        print "Moved:", a_f[0:-3]
        
    data_files = [ f for f in listdir(output_folder+'/'+folder) if isfile(join(output_folder+'/'+folder,f)) and f.endswith('.dat.gz') ]

    print "\n"

    for d_f in data_files:
        fin = gzip.open(output_folder+'/'+folder+'/'+d_f, 'rb')
        data = fin.read()
        fin.close()

        with open(output_folder+'/'+folder+'/'+d_f[0:-3],'w') as fout:
            fout.write(data)

        print "Unzipped:", d_f[0:-3]
def deal_with_log(log_file_name_with_location, monkey_duration):
    # analyze with log:
    logging.info("deal_with_log")
    f_full_log = open(log_file_name_with_location + '.txt', 'r')
    full_log = f_full_log.readlines()
    f_full_log.close()
    full_log_lines_number = len(full_log)
    anr = '// NOT RESPONDING: ' + package_name + ' '
    exception = '// CRASH: ' + package_name + ' '
    mail_content = ''
    for i in xrange(full_log_lines_number):
        if (exception in full_log[i]) | (anr in full_log[i]):
            f_crash_log = open(log_file_name_with_location + '.txt', 'r')
            f_crash_log.close()
            for j in range(i, full_log_lines_number):
                mail_content = mail_content + full_log[j] + '\r'
                # f_crash_log = open(log_file_name_with_location + '.txt', 'a+')
                # f_crash_log.writelines(full_log[j])
                # f_crash_log.close()
            break
    if mail_content == "":
        return mail_content
    else:
        # rename log file
        log_file_name_location_final = log_file_name_with_location + ' ' + monkey_duration + "hour"
        tmp = log_file_name_with_location.split('/')
        # logging.info(tmp)
        log_file_name = tmp[-1]
        mail_content = log_file_name + '_' + monkey_duration + "hour" + '\r\r' + mail_content
        os.rename(log_file_name_with_location + '.txt', log_file_name_location_final + '.txt')
        return mail_content
def convertwavtoqcp(wavfile, qcpfile, optimization=None):

    pvconv=shortfilename(gethelperbinary('pvconv'))

    w_name=shortfilename(wavfile)

    q_name=common.stripext(w_name)+'.qcp'

    try:

        os.remove(q_name)

    except:

        pass

    if optimization is None:

        run(pvconv, w_name)

    else:

        run(pvconv, '-r', _qcp_optimization_params[optimization], w_name)

    try:

        os.remove(qcpfile)

    except:

        pass

    os.rename(q_name, qcpfile)
Beispiel #29
0
 def clone_from(self, source_url):
     '''Initialize a repo as a clone of another'''
     self._repo.set_status('cloning')
     log.info('Initialize %r as a clone of %s',
              self._repo, source_url)
     try:
         fullname = self._setup_paths(create_repo_dir=False)
         if os.path.exists(fullname):
             shutil.rmtree(fullname)
         if self.can_hotcopy(source_url):
             shutil.copytree(source_url, fullname)
             post_receive = os.path.join(
                 self._repo.full_fs_path, 'hooks', 'post-receive')
             if os.path.exists(post_receive):
                 os.rename(post_receive, post_receive + '-user')
             repo = git.Repo(fullname)
         else:
             repo = git.Repo.clone_from(
                 source_url,
                 to_path=fullname,
                 bare=True)
         self.__dict__['_git'] = repo
         self._setup_special_files(source_url)
     except:
         self._repo.set_status('ready')
         raise
Beispiel #30
0
def do_rename(old_path, new_path):
    try:
        os.rename(old_path, new_path)
    except Exception, err:
        logging.exception("Rename failed on %s to %s  err: %s", old_path, new_path, \
                          str(err))
        raise
Beispiel #31
0
def convert_pupil_mobile_recording_to_v094(rec_dir):
    logger.info("Converting Pupil Mobile recording to v0.9.4 format")
    # convert time files and rename corresponding videos
    time_pattern = os.path.join(rec_dir, '*.time')
    for time_loc in glob.glob(time_pattern):
        time_file_name = os.path.split(time_loc)[1]
        time_name, time_ext = os.path.splitext(time_file_name)

        potential_locs = [
            os.path.join(rec_dir, time_name + ext)
            for ext in ('.mjpeg', '.mp4', '.m4a')
        ]
        existing_locs = [loc for loc in potential_locs if os.path.exists(loc)]
        if not existing_locs:
            continue
        else:
            video_loc = existing_locs[0]

        if time_name in ('Pupil Cam1 ID0', 'Pupil Cam1 ID1', 'Pupil Cam2 ID0',
                         'Pupil Cam2 ID1'):
            time_name = 'eye' + time_name[-1]  # rename eye files
        elif time_name in ('Pupil Cam1 ID2', 'Logitech Webcam C930e'):
            video = av.open(video_loc, 'r')
            frame_size = video.streams.video[
                0].format.width, video.streams.video[0].format.height
            del video
            intrinsics = load_intrinsics(rec_dir, time_name, frame_size)
            intrinsics.save(rec_dir, 'world')

            time_name = 'world'  # assume world file
        elif time_name.startswith('audio_'):
            time_name = 'audio'

        timestamps = np.fromfile(time_loc, dtype='>f8')
        timestamp_loc = os.path.join(rec_dir,
                                     '{}_timestamps.npy'.format(time_name))
        logger.info('Creating "{}"'.format(os.path.split(timestamp_loc)[1]))
        np.save(timestamp_loc, timestamps)

        if time_name == 'audio':
            video_dst = os.path.join(rec_dir, time_name) + '.mp4'
            logger.info('Renaming "{}" to "{}"'.format(
                os.path.split(video_loc)[1],
                os.path.split(video_dst)[1]))
            os.rename(video_loc, video_dst)
        else:
            video_dst = os.path.join(
                rec_dir, time_name) + os.path.splitext(video_loc)[1]
            logger.info('Renaming "{}" to "{}"'.format(
                os.path.split(video_loc)[1],
                os.path.split(video_dst)[1]))
            os.rename(video_loc, video_dst)

    pupil_data_loc = os.path.join(rec_dir, 'pupil_data')
    if not os.path.exists(pupil_data_loc):
        logger.info('Creating "pupil_data"')
        save_object(
            {
                'pupil_positions': [],
                'gaze_positions': [],
                'notifications': []
            }, pupil_data_loc)
# -*- coding: UTF-8 -*-
import os
path = "/home/leo/PythonProjects/CHINESE-OCR/CHINESE-OCR_workspace/ctpn_data/VOCdevkit/VOC2007/JPEGImages"
filelist = os.listdir(path) #该文件夹下所有的文件(包括文件夹)
count=0
for file in filelist:
    print(file)
for file in filelist:   #遍历所有文件
    Olddir=os.path.join(path,file)   #原来的文件路径
    if os.path.isdir(Olddir):   #如果是文件夹则跳过
	continue
    filename=os.path.splitext(file)[0]   #文件名
    filetype=os.path.splitext(file)[1]   #文件扩展名
    Newdir=os.path.join(path,str(count).zfill(6)+filetype)  #用字符串函数zfill 以0补全所需位数
    os.rename(Olddir,Newdir)#重命名
    count+=1
if __name__ == "__main__":

    # Download the full data from the models
    print ("Downloading the coil models checkpoints  500 MB")
    file_id = '1ynh2V6FMpC7NLXX2kbuxSHb3ymu7fp-r'
    destination_pack = 'coil_view_models.tar.gz'

    download_file_from_google_drive(file_id, destination_pack)
    destination_final = '_logs/'
    if not os.path.exists(destination_final):
        os.makedirs(destination_final)

    tf = tarfile.open("coil_view_models.tar.gz")
    tf.extractall(destination_final)
    # Remove both the original and the file after moving.
    os.remove("coil_view_models.tar.gz")

    # Now you move the two models for their respective folders
    # The 320000.pth model is from the town01/02 model
    distination_town02 = '_logs/nocrash/resnet34imnet10/checkpoints/'
    if not os.path.exists(distination_town02):
        os.makedirs(distination_town02)
    os.rename("_logs/320000.pth", distination_town02 + '320000.pth')

    # The 200000.pth  is from the
    distination_town03 = '_logs/town03/resnet34imnet/checkpoints/'
    if not os.path.exists(distination_town03):
        os.makedirs(distination_town03)
    os.rename("_logs/200000.pth", distination_town03 + '200000.pth')

Beispiel #34
0
def preProcessDataFolders(rawDataDir, stimDir):
    """Puts the stimulus output files automatically to the T-series folders. 
       Find the images and stimuli according to the flyIDs. Sorts them 
       according to time and matches them.
       
       Naming scheme:
       Experiment folder name: Has to include the string "fly" inside
       Stimulus folder name: Has to include the experiment folder name within

    Parameters
    ==========
    rawDataDir : str
        Path of the folder where the raw data is located.
        
    stimDir : str
        Path of the folder where the stimuli are located.



    Returns
    =======
    
    """
    print('Pre-processing the data folders...\n')

    all_data_folders = os.listdir(rawDataDir)
    all_stim_folders = os.listdir(stimDir)
    for data_folder_name in all_data_folders:
        # Enter if it is an image data folder determined by fly ID
        if 'fly' in data_folder_name.lower():  # Has to include 'fly' in its ID
            images_path = os.path.join(rawDataDir, data_folder_name)
            current_exp_ID = data_folder_name.lower()

            # Finding the T-series
            t_series_names = [file_n for file_n in os.listdir(images_path)\
                             if 'tseries' in file_n.lower() or \
                             't-series' in file_n.lower()]
            # Ordering the T-series according to their timing
            t_series_names.sort(key=lambda t_series_file:\
                                os.path.getmtime(os.path.join(images_path,
                                                                 t_series_file)))

            # Searching for the stimulus file
            stim_found = False
            for stim_folder_name in all_stim_folders:
                if 'stimuli' in stim_folder_name.lower():
                    # Searching for the correct stimulus folder
                    if current_exp_ID in stim_folder_name.lower():
                        stimulus_path = os.path.join(stimDir, stim_folder_name)
                        stimuli_output_names = sortStimulusFiles(stimulus_path)
                        stim_found = True
                        break
            if not stim_found:

                warn_string = "!!!!Stimulus folder not found for %s...\n" % \
                      (current_exp_ID)
                warnings.warn(warn_string)
                continue
            # Copying the output files to the corresponding T-series folders
            if len(stimuli_output_names) == len(t_series_names):
                print("Image and stimuli numbers match for  %s...\n" % \
                      (current_exp_ID))
                for i, stimuli_output_file in enumerate(stimuli_output_names):
                    os.rename(
                        os.path.join(stimulus_path, stimuli_output_file),
                        os.path.join(images_path, t_series_names[i],
                                     stimuli_output_file))

                print("Folder processing of %s completed...\n" % \
                      (current_exp_ID))
            else:
                warn_string = "!!!!Image and stimuli numbers DO NOT match for  %s...\n" % \
                      (current_exp_ID)
                warnings.warn(warn_string)
    return None
Beispiel #35
0
def train():
    # added by chenww
    wdir = opt.weight_save_path
    last = wdir + 'last.pt'
    best = wdir + 'best.pt'
    results_file = wdir + 'results.txt'
    # added by chenww

    cfg = opt.cfg
    data = opt.data
    epochs = opt.epochs  # 500200 batches at bs 64, 117263 images = 273 epochs
    batch_size = opt.batch_size
    accumulate = max(round(64 / batch_size), 1)  # accumulate n times before optimizer update (bs 64)
    weights = opt.weights  # initial training weights
    imgsz_min, imgsz_max, imgsz_test = opt.img_size  # img sizes (min, max, test)

    # Image Sizes
    gs = 64  # (pixels) grid size
    assert math.fmod(imgsz_min, gs) == 0, '--img-size %g must be a %g-multiple' % (imgsz_min, gs)
    opt.multi_scale |= imgsz_min != imgsz_max  # multi if different (min, max)
    if opt.multi_scale:
        if imgsz_min == imgsz_max:
            imgsz_min //= 1.5
            imgsz_max //= 0.667
        grid_min, grid_max = imgsz_min // gs, imgsz_max // gs
        imgsz_min, imgsz_max = int(grid_min * gs), int(grid_max * gs)
    img_size = imgsz_max  # initialize with max size

    # Configure run
    init_seeds()
    data_dict = parse_data_cfg(data)
    train_path = data_dict['train']
    test_path = data_dict['valid']
    nc = 1 if opt.single_cls else int(data_dict['classes'])  # number of classes
    hyp['cls'] *= nc / 80  # update coco-tuned hyp['cls'] to current dataset

    # Remove previous results
    for f in glob.glob('*_batch*.jpg') + glob.glob(results_file):
        os.remove(f)

    # Initialize model
    model = Darknet(cfg).to(device)

    # Optimizer
    pg0, pg1, pg2 = [], [], []  # optimizer parameter groups
    for k, v in dict(model.named_parameters()).items():
        if '.bias' in k:
            pg2 += [v]  # biases
        elif 'Conv2d.weight' in k:
            pg1 += [v]  # apply weight_decay
        else:
            pg0 += [v]  # all else

    if opt.adam:
        # hyp['lr0'] *= 0.1  # reduce lr (i.e. SGD=5E-3, Adam=5E-4)
        optimizer = optim.Adam(pg0, lr=hyp['lr0'])
        # optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1)
    else:
        optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
    optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']})  # add pg1 with weight_decay
    optimizer.add_param_group({'params': pg2})  # add pg2 (biases)
    print('Optimizer groups: %g .bias, %g Conv2d.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
    del pg0, pg1, pg2

    start_epoch = 0
    best_fitness = 0.0
    attempt_download(weights)
    if weights.endswith('.pt'):  # pytorch format
        # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc.
        chkpt = torch.load(weights, map_location=device)

        # load model
        try:
            chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()}
            model.load_state_dict(chkpt['model'], strict=False)
        except KeyError as e:
            s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \
                "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights)
            raise KeyError(s) from e

        # load optimizer
        if chkpt['optimizer'] is not None:
            optimizer.load_state_dict(chkpt['optimizer'])
            best_fitness = chkpt['best_fitness']

        # load results
        if chkpt.get('training_results') is not None:
            with open(results_file, 'w') as file:
                file.write(chkpt['training_results'])  # write results.txt

        start_epoch = chkpt['epoch'] + 1
        del chkpt

    elif len(weights) > 0:  # darknet format
        # possible weights are '*.weights', 'yolov3-tiny.conv.15',  'darknet53.conv.74' etc.
        load_darknet_weights(model, weights)

    # Mixed precision training https://github.com/NVIDIA/apex
    if mixed_precision:
        model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)

    # Scheduler https://arxiv.org/pdf/1812.01187.pdf
    lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.95 + 0.05  # cosine
    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
    scheduler.last_epoch = start_epoch - 1  # see link below
    # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822

    # Plot lr schedule
    # y = []
    # for _ in range(epochs):
    #     scheduler.step()
    #     y.append(optimizer.param_groups[0]['lr'])
    # plt.plot(y, '.-', label='LambdaLR')
    # plt.xlabel('epoch')
    # plt.ylabel('LR')
    # plt.tight_layout()
    # plt.savefig('LR.png', dpi=300)

    # Initialize distributed training
    if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available():
        dist.init_process_group(backend='nccl',  # 'distributed backend'
                                init_method='tcp://127.0.0.1:9999',  # distributed training init method
                                world_size=1,  # number of nodes for distributed training
                                rank=0)  # distributed training node rank
        model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
        model.yolo_layers = model.module.yolo_layers  # move yolo layer indices to top level

    # Dataset
    dataset = LoadImagesAndLabels(train_path, img_size, batch_size,
                                  augment=True,
                                  hyp=hyp,  # augmentation hyperparameters
                                  rect=opt.rect,  # rectangular training
                                  cache_images=opt.cache_images,
                                  single_cls=opt.single_cls)

    # Dataloader
    batch_size = min(batch_size, len(dataset))
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             num_workers=nw,
                                             shuffle=not opt.rect,  # Shuffle=True unless rectangular training is used
                                             pin_memory=True,
                                             collate_fn=dataset.collate_fn)

    # Testloader
    testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path, imgsz_test, batch_size,
                                                                 hyp=hyp,
                                                                 rect=True,
                                                                 cache_images=opt.cache_images,
                                                                 single_cls=opt.single_cls),
                                             batch_size=batch_size,
                                             num_workers=nw,
                                             pin_memory=True,
                                             collate_fn=dataset.collate_fn)

    # Model parameters
    model.nc = nc  # attach number of classes to model
    model.hyp = hyp  # attach hyperparameters to model
    model.gr = 1.0  # giou loss ratio (obj_loss = 1.0 or giou)
    model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device)  # attach class weights

    # Model EMA
    ema = torch_utils.ModelEMA(model)

    # Start training
    nb = len(dataloader)  # number of batches
    n_burn = max(3 * nb, 500)  # burn-in iterations, max(3 epochs, 500 iterations)
    maps = np.zeros(nc)  # mAP per class
    # torch.autograd.set_detect_anomaly(True)
    results = (0, 0, 0, 0, 0, 0, 0)  # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
    t0 = time.time()
    print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test))
    print('Using %g dataloader workers' % nw)
    print('Starting training for %g epochs...' % epochs)
    for epoch in range(start_epoch, epochs):  # epoch ------------------------------------------------------------------
        model.train()

        # Update image weights (optional)
        if dataset.image_weights:
            w = model.class_weights.cpu().numpy() * (1 - maps) ** 2  # class weights
            image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)
            dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n)  # rand weighted idx

        mloss = torch.zeros(4).to(device)  # mean losses
        print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
        pbar = tqdm(enumerate(dataloader), total=nb)  # progress bar
        for i, (imgs, targets, paths, _) in pbar:  # batch -------------------------------------------------------------
            ni = i + nb * epoch  # number integrated batches (since train start)
            imgs = imgs.to(device).float() / 255.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0
            targets = targets.to(device)

            # Burn-in
            if ni <= n_burn * 2:
                model.gr = np.interp(ni, [0, n_burn * 2], [0.0, 1.0])  # giou loss ratio (obj_loss = 1.0 or giou)
                if ni == n_burn:  # burnin complete
                    print_model_biases(model)

                for j, x in enumerate(optimizer.param_groups):
                    # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
                    x['lr'] = np.interp(ni, [0, n_burn], [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
                    if 'momentum' in x:
                        x['momentum'] = np.interp(ni, [0, n_burn], [0.9, hyp['momentum']])

            # Multi-Scale
            if opt.multi_scale:
                if ni / accumulate % 1 == 0:  #  adjust img_size (67% - 150%) every 1 batch
                    img_size = random.randrange(grid_min, grid_max + 1) * gs
                sf = img_size / max(imgs.shape[2:])  # scale factor
                if sf != 1:
                    ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to 32-multiple)
                    imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)

            # Forward
            pred = model(imgs)

            # Loss
            loss, loss_items = compute_loss(pred, targets, model)
            if not torch.isfinite(loss):
                print('WARNING: non-finite loss, ending training ', loss_items)
                return results

            # Backward
            loss *= batch_size / 64  # scale loss
            if mixed_precision:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()

            # Optimize
            if ni % accumulate == 0:
                optimizer.step()
                optimizer.zero_grad()
                ema.update(model)

            # Print
            mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses
            mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0)  # (GB)
            s = ('%10s' * 2 + '%10.3g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size)
            pbar.set_description(s)

            # Plot
            if ni < 1:
                f = 'train_batch%g.jpg' % i  # filename
                res = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
                if tb_writer:
                    tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch)
                    # tb_writer.add_graph(model, imgs)  # add model to tensorboard

            # end batch ------------------------------------------------------------------------------------------------

        # Update scheduler
        scheduler.step()

        # Process epoch results
        ema.update_attr(model)
        final_epoch = epoch + 1 == epochs
        if not opt.notest or final_epoch:  # Calculate mAP
            is_coco = any([x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80
            results, maps = test.test(cfg,
                                      data,
                                      batch_size=batch_size,
                                      img_size=imgsz_test,
                                      model=ema.ema,
                                      save_json=final_epoch and is_coco,
                                      single_cls=opt.single_cls,
                                      dataloader=testloader)

        # Write
        with open(results_file, 'a') as f:
            # result.txt : ep/total_ep  gpu_mem  m_GIoU_loss, m_obj_loss, m_cls_loss, m_total_loss, bbox_targets, img_size,  P, R, mAP, F1, test_losses=(GIoU, obj, cls)
            f.write(s + '%10.3g' * 7 % results + '\n')  # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
        if len(opt.name) and opt.bucket:
            os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name))

        # Tensorboard
        if tb_writer:
            tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss',
                    'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1',
                    'val/giou_loss', 'val/obj_loss', 'val/cls_loss']
            for x, tag in zip(list(mloss[:-1]) + list(results), tags):
                tb_writer.add_scalar(tag, x, epoch)

        # Update best mAP
        fi = fitness(np.array(results).reshape(1, -1))  # fitness_i = weighted combination of [P, R, mAP, F1]
        if fi > best_fitness:
            best_fitness = fi

        # Save model
        save = (not opt.nosave) or (final_epoch and not opt.evolve)
        if save:
            with open(results_file, 'r') as f:  # create checkpoint
                chkpt = {'epoch': epoch,
                         'best_fitness': best_fitness,
                         'training_results': f.read(),
                         'model': ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(),
                         'optimizer': None if final_epoch else optimizer.state_dict()}

            # Save last, best and delete

            torch.save(chkpt, last)
            if (best_fitness == fi) and not final_epoch:
                torch.save(chkpt, best)
            del chkpt

        # end epoch ----------------------------------------------------------------------------------------------------
    # end training

    n = opt.name
    if len(n):
        n = '_' + n if not n.isnumeric() else n
        fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n
        for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]):
            if os.path.exists(f1):
                os.rename(f1, f2)  # rename
                ispt = f2.endswith('.pt')  # is *.pt
                strip_optimizer(f2) if ispt else None  # strip optimizer
                os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None  # upload

    if not opt.evolve:
        plot_results()  # save as results.png
    print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
    dist.destroy_process_group() if torch.cuda.device_count() > 1 else None
    torch.cuda.empty_cache()
    return results
# remove_mature.py - script to remove mame roms with games for adults.
#
# It reads gamelist.xml index, identifies adult games by checking if genre contains "*mature*".
# Rom files are renamed by adding .adult extension. Corresponding game screenshot is remove from images directory (if exists).
#
# This script must be invoked inside /home/pi/RetroPie/roms/mame-libretro directory.
# 

import xmltodict
import os
with open ("gamelist.xml") as fd:
    doc = xmltodict.parse(fd.read())

mature_games= [game['path'] for game in doc['gameList']['game'] if game['genre'] and "*Mature*" in game['genre']]

for game in mature_games:
    print(game)
    os.rename( game, game+".adult")
Beispiel #37
0
import os
files = os.listdir("C:/Users/Yash/Desktop/B.E Project/landsatxplore-master")
for src in files:
    dst = src.replace('LC08_L1TP_', '')
    os.rename(src, dst)
for src in files:
    dst = src.replace('_01_T1', '')
    os.rename(src, dst)
 def save_state( ):
     os.rename( Sensor.state_fullname, Sensor.backup_fullname( ) )
     with open( Sensor.state_fullname, 'wb' ) as json_file:
         pickle.dump( Sensor.managed_list, json_file )
def cleanUpGLIFFiles(defaultContentsFilePath, glyphDirPath, doWarning=True):
    changed = 0
    contentsFilePath = os.path.join(glyphDirPath, kContentsName)
    # maps glyph names to files.

    with open(contentsFilePath, 'r', encoding='utf-8') as fp:
        contentsDict = plistlib.load(fp)

    # First, delete glyph files that are not in the contents.plist file in
    # the glyphDirPath. In some UFOfont files, we end up with case errors,
    # so we need to check for a lower-case version of the file name.
    fileDict = {}
    for glyphName, fileName in contentsDict.items():
        fileDict[fileName] = glyphName
        lcFileName = fileName.lower()
        if lcFileName != fileName:
            fileDict[lcFileName + kAdobeLCALtSuffix] = glyphName

    fileList = os.listdir(glyphDirPath)
    for fileName in fileList:
        if not fileName.endswith(".glif"):
            continue
        if fileName in fileDict:
            continue
        lcFileName = fileName.lower()
        if (lcFileName + kAdobeLCALtSuffix) in fileDict:
            # glif file exists which has a case-insensitive match to file name
            # entry in the contents.plist file; assume latter is intended, and
            # change the file name to match.
            glyphFilePathOld = os.path.join(glyphDirPath, fileName)
            glyphFilePathNew = os.path.join(glyphDirPath, lcFileName)
            os.rename(glyphFilePathOld, glyphFilePathNew)
            continue

        glyphFilePath = os.path.join(glyphDirPath, fileName)
        os.remove(glyphFilePath)
        if doWarning:
            print("Removing glif file %s that was not in the contents.plist "
                  "file: %s" % (glyphDirPath, contentsFilePath))
        changed = 1

    if defaultContentsFilePath == contentsFilePath:
        return changed

    # Now remove glyphs that are not referenced in the defaultContentsFilePath.
    # Since the processed glyph layer is written with the defcon module,
    # and the default layer may be written by anything, the actual glyph file
    # names may be different for the same UFO glyph. We need to compare by UFO
    # glyph name, not file name.

    with open(defaultContentsFilePath, 'r', encoding='utf-8') as fp:
        defaultContentsDict = plistlib.load(fp)

    fileList = os.listdir(glyphDirPath)
    for fileName in fileList:
        if not fileName.endswith(".glif"):
            continue
        try:
            glyphName = fileDict[fileName]
            if glyphName not in defaultContentsDict:
                glyphFilePath = os.path.join(glyphDirPath, fileName)
                os.remove(glyphFilePath)
                if doWarning:
                    print("Removing glif %s that was not in the "
                          "contents.plist file: %s" % (
                              glyphName, defaultContentsFilePath))
                changed = 1

        except KeyError:
            print("Shouldn't happen %s %s" % (
                glyphName, defaultContentsFilePath))

    return changed
Beispiel #40
0
def setup_experiment(debug=True, verbose=False, app=None, exp_config=None):
    """Check the app and, if compatible with Dallinger, freeze its state."""
    print_header()

    # Verify that the package is usable.
    log("Verifying that directory is compatible with Dallinger...")
    if not verify_package(verbose=verbose):
        raise AssertionError(
            "This is not a valid Dallinger app. " +
            "Fix the errors and then try running 'dallinger verify'.")

    # Verify that the Postgres server is running.
    try:
        psycopg2.connect(database="x", user="******", password="******")
    except psycopg2.OperationalError as e:
        if "could not connect to server" in str(e):
            raise RuntimeError("The Postgres server isn't running.")

    # Load configuration.
    config = get_config()
    if not config.ready:
        config.load_config()

    # Check that the demo-specific requirements are satisfied.
    try:
        with open("requirements.txt", "r") as f:
            dependencies = [r for r in f.readlines() if r[:3] != "-e "]
    except:
        dependencies = []

    pkg_resources.require(dependencies)

    # Generate a unique id for this experiment.
    generated_uid = public_id = str(uuid.uuid4())

    # If the user provided an app name, use it everywhere that's user-facing.
    if app:
        public_id = str(app)

    log("Experiment id is " + public_id + "")

    # Copy this directory into a temporary folder, ignoring .git
    dst = os.path.join(tempfile.mkdtemp(), public_id)
    to_ignore = shutil.ignore_patterns(os.path.join(".git", "*"), "*.db",
                                       "snapshots", "data", "server.log")
    shutil.copytree(os.getcwd(), dst, ignore=to_ignore)

    click.echo(dst)

    # Save the experiment id
    with open(os.path.join(dst, "experiment_id.txt"), "w") as file:
        file.write(generated_uid)

    # Change directory to the temporary folder.
    cwd = os.getcwd()
    os.chdir(dst)

    # Write the custom config
    if exp_config:
        config.extend(exp_config)

    config.write_config(filter_sensitive=True)

    # Zip up the temporary directory and place it in the cwd.
    if not debug:
        log("Freezing the experiment package...")
        shutil.make_archive(
            os.path.join(cwd, "snapshots", public_id + "-code"), "zip", dst)

    # Check directories.
    if not os.path.exists(os.path.join("static", "scripts")):
        os.makedirs(os.path.join("static", "scripts"))
    if not os.path.exists("templates"):
        os.makedirs("templates")
    if not os.path.exists(os.path.join("static", "css")):
        os.makedirs(os.path.join("static", "css"))

    # Rename experiment.py for backwards compatibility.
    os.rename(os.path.join(dst, "experiment.py"),
              os.path.join(dst, "dallinger_experiment.py"))

    # Get dallinger package location.
    from pkg_resources import get_distribution
    dist = get_distribution('dallinger')
    src_base = os.path.join(dist.location, dist.project_name)

    heroku_files = [
        "Procfile",
        "launch.py",
        "worker.py",
        "clock.py",
    ]

    for filename in heroku_files:
        src = os.path.join(src_base, "heroku", filename)
        shutil.copy(src, os.path.join(dst, filename))

    clock_on = config.get('clock_on', False)

    # If the clock process has been disabled, overwrite the Procfile.
    if not clock_on:
        src = os.path.join(src_base, "heroku", "Procfile_no_clock")
        shutil.copy(src, os.path.join(dst, "Procfile"))

    frontend_files = [
        os.path.join("static", "css", "dallinger.css"),
        os.path.join("static", "scripts", "dallinger.js"),
        os.path.join("static", "scripts", "reqwest.min.js"),
        os.path.join("templates", "error.html"),
        os.path.join("templates", "launch.html"),
        os.path.join("templates", "complete.html"),
        os.path.join("templates", "thanks.html"),
        os.path.join("static", "robots.txt")
    ]

    for filename in frontend_files:
        src = os.path.join(src_base, "frontend", filename)
        shutil.copy(src, os.path.join(dst, filename))

    time.sleep(0.25)

    os.chdir(cwd)

    return (public_id, dst)
Beispiel #41
0

if __name__ == '__main__':
    if len(sys.argv) > 1:
        checkip(sys.argv[1])
    else:
        files = os.listdir(g_filedir)
        files.sort()
        i = j = 0
	n = len(files)
        for item in files:
	    j += 1
            if "googleip-" in item:
                i = re.findall(r'([0-9]+)',item)[0]
                if os.path.exists("googleip-%s.txt" % i):
                    g_googleipfile = os.path.join(g_filedir,"googleip-%s.txt" % i)
                    evt_ipramdomend.clear()
                    print "\n", "="*80, "\nbegin check googleip-%s.txt" % i
                    list_ping()
                    if not os.path.exists(g_filedir + "/tmp"): os.mkdir("tmp")
                    if os.path.exists("ip_tmperror.txt"): os.rename("ip_tmperror.txt", "ip_tmperror-%s.txt" % i)
                    if os.path.exists("ip_tmpno.txt"): os.rename("ip_tmpno.txt", "ip_tmpno-%s.txt" % i)
                    if os.path.exists("ip_tmpok.txt"): os.rename("ip_tmpok.txt", "ip_tmpok-%s.txt" % i)
                    if os.path.exists("ip_tmperror-%s.txt" % i): move_over("ip_tmperror-%s.txt" % i, "tmp/")
                    if os.path.exists("ip_tmpno-%s.txt" % i): move_over("ip_tmpno-%s.txt" % i, "tmp/")
                    if os.path.exists("ip_tmpok-%s.txt" % i): move_over("ip_tmpok-%s.txt" % i, "tmp/")
                    if os.path.exists("googleip-%s.txt" % i): os.remove("googleip-%s.txt" % i)
            elif j == n and i == 0:
            	sort_tmpokfile(0)
                list_ping()
Beispiel #42
0
#coding=utf-8
'''
Created on 2017年1月4日

@author: quqiao
'''

#a=(1,2,3,4,5)
#print type(a)
#print list(a)
#print type(list(a))

import sys,os
if len(sys.argv)<=4:
    print "usage:./file_replace.py  old_text  new_text filename"
old_text,new_text = sys.argv[1],sys.argv[2]
file_name = sys.argv[3]

f= file(file_name,'rb')
new_file = file('.%s.bak'% file_name,'wb')
for line in f.xreadlines():
    new_file.write(line.replace(old_text,new_text))
f.close()
new_file.close()
    
if '--bak' in sys.argv:
    os.rename(file_name,'.%s.bak2'%file_name)
    os.rename('.%s.bak'%file_name,file_name) 
else:
    os.rename('.%s.bak'%file_name,file_name)
    
Beispiel #43
0
    def print_resr_length(self):
        print "model:", self.model, "duration:", self.duration, "num_ps:", self.num_ps, "num_worker:", self.num_worker, \
            "ps_cpu:", self.ps_cpu, "worker_cpu:", self.worker_gpu, "worker_gpu:", self.worker_gpu


jobs = []
num_trace_files = 0
for trace_file in sorted(os.listdir(trace_dir)):
    fn = trace_dir + trace_file
    if "job_trace" in trace_file and os.path.isfile(fn):
        pos = fn.index("2018")-1
        if fn[pos] == '.':
            print "Error: some file wrong format", trace_file
            new_fn = fn[:pos] + "_" + fn[pos+1:]
            os.rename(fn, new_fn)
        else:
            num_trace_files += 1
            with open(fn, 'r') as f:
                for line in f:
                    line = line.replace('\n', '')
                    if not line:
                        continue
                    items = line.split(',')
                    id = items[0]
                    model = items[1]
                    start = int(items[2])
                    end = int(items[3])

                    if end == 0: # some jobs have no end time, may with/without ClusterInfo
                        continue
Beispiel #44
0
import os

#rootdir = 'C:\\Users\\34322\\Desktop\\out'
rootdir = '/home/jiangyl/pics'
list = os.listdir(rootdir) #列出文件夹下所有的目录与文件

for i in range(0, len(list)):
    path = os.path.join(rootdir, list[i])
    #print(path)
    if path.find('RGB') != (-1):
        path1 = path.replace('jpg', 'png')
        print(path)
        print(path1)
        os.rename(path, path1)

        
'''for i in range(0, len(list)):
    path = os.path.join(rootdir, list[i])
    #print(path)
    if path.find('txt?') != (-1):
        path1 = path.replace('txt?', 'txt')
        print(path)
        print(path1)
        os.rename(path, path1)'''
Beispiel #45
0
def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):
    """Package the output of the build process.
    """
    outfiles = []
    tmp_build_dir = create_temp_dir()
    logging.debug("Packaging for build output: {}".format(build_output))
    logging.info("Using temporary directory: {}".format(tmp_build_dir))
    try:
        for platform in build_output:
            # Create top-level folder displaying which platform (linux, etc)
            os.makedirs(os.path.join(tmp_build_dir, platform))
            for arch in build_output[platform]:
                logging.info("Creating packages for {}/{}".format(platform, arch))
                # Create second-level directory displaying the architecture (amd64, etc)
                current_location = build_output[platform][arch]

                # Create directory tree to mimic file system of package
                build_root = os.path.join(tmp_build_dir,
                                          platform,
                                          arch,
                                          '{}-{}-{}'.format(PACKAGE_NAME, version, iteration))
                os.makedirs(build_root)

                # Copy packaging scripts to build directory
                if platform == "windows":
                    # For windows and static builds, just copy
                    # binaries to root of package (no other scripts or
                    # directories)
                    package_scripts(build_root, config_only=True, windows=True)
                elif static or "static_" in arch:
                    package_scripts(build_root, config_only=True)
                else:
                    create_package_fs(build_root)
                    package_scripts(build_root)

                if platform != "windows":
                    package_man_files(build_root)

                for binary in targets:
                    # Copy newly-built binaries to packaging directory
                    if platform == 'windows':
                        binary = binary + '.exe'
                    if platform == 'windows' or static or "static_" in arch:
                        # Where the binary should go in the package filesystem
                        to = os.path.join(build_root, binary)
                        # Where the binary currently is located
                        fr = os.path.join(current_location, binary)
                    else:
                        # Where the binary currently is located
                        fr = os.path.join(current_location, binary)
                        # Where the binary should go in the package filesystem
                        to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)
                    shutil.copy(fr, to)

                for package_type in supported_packages[platform]:
                    # Package the directory structure for each package type for the platform
                    logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type))
                    name = pkg_name
                    # Reset version, iteration, and current location on each run
                    # since they may be modified below.
                    package_version = version
                    package_iteration = iteration
                    if "static_" in arch:
                        # Remove the "static_" from the displayed arch on the package
                        package_arch = arch.replace("static_", "")
                    else:
                        package_arch = arch
                    if not release and not nightly:
                        # For non-release builds, just use the commit hash as the version
                        package_version = "{}~{}".format(version,
                                                         get_current_commit(short=True))
                        package_iteration = "0"
                    package_build_root = build_root
                    current_location = build_output[platform][arch]

                    if package_type in ['zip', 'tar']:
                        # For tars and zips, start the packaging one folder above
                        # the build root (to include the package name)
                        package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))
                        if nightly:
                            if static or "static_" in arch:
                                name = '{}-static-nightly_{}_{}'.format(name,
                                                                        platform,
                                                                        package_arch)
                            else:
                                name = '{}-nightly_{}_{}'.format(name,
                                                                 platform,
                                                                 package_arch)
                        else:
                            if static or "static_" in arch:
                                name = '{}-{}-static_{}_{}'.format(name,
                                                                   package_version,
                                                                   platform,
                                                                   package_arch)
                            else:
                                name = '{}-{}_{}_{}'.format(name,
                                                            package_version,
                                                            platform,
                                                            package_arch)
                        current_location = os.path.join(os.getcwd(), current_location)
                        if package_type == 'tar':
                            tar_command = "cd {} && tar -cvzf {}.tar.gz --owner=root ./*".format(package_build_root, name)
                            run(tar_command, shell=True)
                            run("mv {}.tar.gz {}".format(os.path.join(package_build_root, name), current_location), shell=True)
                            outfile = os.path.join(current_location, name + ".tar.gz")
                            outfiles.append(outfile)
                        elif package_type == 'zip':
                            zip_command = "cd {} && zip -r {}.zip ./*".format(package_build_root, name)
                            run(zip_command, shell=True)
                            run("mv {}.zip {}".format(os.path.join(package_build_root, name), current_location), shell=True)
                            outfile = os.path.join(current_location, name + ".zip")
                            outfiles.append(outfile)
                    elif package_type not in ['zip', 'tar'] and static or "static_" in arch:
                        logging.info("Skipping package type '{}' for static builds.".format(package_type))
                    else:
                        fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format(
                            fpm_common_args,
                            name,
                            package_arch,
                            package_type,
                            package_version,
                            package_iteration,
                            package_build_root,
                            current_location)
                        if package_type == "rpm":
                            fpm_command += "--depends coreutils --depends shadow-utils --rpm-posttrans {}".format(POSTINST_SCRIPT)
                        out = run(fpm_command, shell=True)
                        matches = re.search(':path=>"(.*)"', out)
                        outfile = None
                        if matches is not None:
                            outfile = matches.groups()[0]
                        if outfile is None:
                            logging.warn("Could not determine output from packaging output!")
                        else:
                            if nightly:
                                # Strip nightly version from package name
                                new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), "nightly")
                                os.rename(outfile, new_outfile)
                                outfile = new_outfile
                            else:
                                if package_type == 'rpm':
                                    # rpm's convert any dashes to underscores
                                    package_version = package_version.replace("-", "_")
                                new_outfile = outfile.replace("{}-{}".format(package_version, package_iteration), package_version)
                                os.rename(outfile, new_outfile)
                                outfile = new_outfile
                            outfiles.append(os.path.join(os.getcwd(), outfile))
        logging.debug("Produced package files: {}".format(outfiles))
        return outfiles
    finally:
        # Cleanup
        shutil.rmtree(tmp_build_dir)
Beispiel #46
0
def get_cluster_dump_archive(cluster_config_file: Optional[str] = None,
                             host: Optional[str] = None,
                             ssh_user: Optional[str] = None,
                             ssh_key: Optional[str] = None,
                             docker: Optional[str] = None,
                             local: Optional[bool] = None,
                             output: Optional[str] = None,
                             logs: bool = True,
                             debug_state: bool = True,
                             pip: bool = True,
                             processes: bool = True,
                             processes_verbose: bool = False) -> Optional[str]:

    # Inform the user what kind of logs are collected (before actually
    # collecting, so they can abort)
    content_str = ""
    if logs:
        content_str += \
            "  - The logfiles of your Ray session\n" \
            "    This usually includes Python outputs (stdout/stderr)\n"

    if debug_state:
        content_str += \
            "  - Debug state information on your Ray cluster \n" \
            "    e.g. number of workers, drivers, objects, etc.\n"

    if pip:
        content_str += "  - Your installed Python packages (`pip freeze`)\n"

    if processes:
        content_str += \
            "  - Information on your running Ray processes\n" \
            "    This includes command line arguments\n"

    cli_logger.warning(
        "You are about to create a cluster dump. This will collect data from "
        "cluster nodes.\n\n"
        "The dump will contain this information:\n\n"
        f"{content_str}\n"
        f"If you are concerned about leaking private information, extract "
        f"the archive and inspect its contents before sharing it with "
        f"anyone.")

    # Parse arguments (e.g. fetch info from cluster config)
    cluster_config_file, hosts, ssh_user, ssh_key, docker, cluster_name = \
        _info_from_params(cluster_config_file, host, ssh_user, ssh_key, docker)

    nodes = [
        Node(
            host=h,
            ssh_user=ssh_user,
            ssh_key=ssh_key,
            docker_container=docker) for h in hosts
    ]

    if not nodes:
        cli_logger.error(
            f"No nodes found. Specify with `--host` or by passing a ray "
            f"cluster config to `--cluster`.")
        return None

    if cluster_config_file:
        nodes[0].is_head = True

    if local is None:
        # If called with a cluster config, this was probably started
        # from a laptop
        local = not bool(cluster_config_file)

    parameters = GetParameters(
        logs=logs,
        debug_state=debug_state,
        pip=pip,
        processes=processes,
        processes_verbose=processes_verbose)

    with Archive() as archive:
        if local:
            create_archive_for_local_and_remote_nodes(
                archive, remote_nodes=nodes, parameters=parameters)
        else:
            create_archive_for_remote_nodes(
                archive, remote_nodes=nodes, parameters=parameters)

    if not output:
        if cluster_name:
            filename = f"{cluster_name}_" \
                       f"{datetime.datetime.now():%Y-%m-%d_%H-%M-%S}.tar.gz"
        else:
            filename = f"collected_logs_" \
                       f"{datetime.datetime.now():%Y-%m-%d_%H-%M-%S}.tar.gz"
        output = os.path.join(os.getcwd(), filename)
    else:
        output = os.path.expanduser(output)

    os.rename(archive.file, output)
    return output
import os

os.chdir(r'C:\Users\student\TIL\startCamp\02_day\file_handling'
         )  # 500개의 지원서가 있는곳으로 이동

filenames = os.listdir('.')
for filename in filenames:
    # 확장자가 .txt인 파일만 이름을 바꾼다.
    extension = os.path.splitext(filename)[-1]  # 확장자만 따로 분리

    if extension == '.txt':
        os.rename(filename, filename.replace(
            'SAMSUNG_SAMSUNG_',
            'SSAFY_'))  # 첫번째 인자로 넘어간 이름을, 두번째 인자로 넘어간 이름으로 바꾼다.
def convertVideo(FileName, destinationFormat):
    print "* convertVideo: ", destinationFormat
    newFilename          = "_p_" + FileName[:-3] + destinationFormat
    completeFileName     = os.path.join(UNCONVERTED_VIDS[0], FileName).replace(" ", "\ ")
    completeNewFileName  = os.path.join(UNCONVERTED_VIDS[0], newFilename).replace(" ", "\ ")
    completeDestination  = os.path.join(VIDEOS_LOCATION[0], newFilename).replace(" ", "\ ")
    convertedDestination = os.path.join(CONVERTED_VIDS[0], FileName).replace(" ", "\ ")
    time.sleep(1)
    if(os.path.exists(completeDestination)):
        print " ^^ File Already Exists - ",completeDestination
        return True
    else:
        
        if(destinationFormat == "mp4"):
            video_duration = findVideoDuration(completeFileName)[1]
            #  cmd = "ffmpeg -i "+completeFileName+" -b 1500k -vcodec libx264 -preset slow -preset baseline -g 30 "+completeNewFileName
            if(video_duration > 0):
                print "^ video_duration: ",video_duration
                #cmd = "ffmpeg -i "+completeFileName+" -vcodec libx264 -preset fast -maxrate 500k -bufsize 1000k -threads 0 -acodec libfdk_aac -b:a 128k -t "+ str(video_duration-1)+" "+completeNewFileName
                cmd = "ffmpeg -i "+completeFileName+" -threads 0  -t "+ str(video_duration-1)+" "+completeNewFileName
                #cmd = "ffmpeg -i "+completeFileName+" -vcodec libx264 -preset fast -threads 0 -acodec libfdk_aac -t "+ str(video_duration-1)+" -vf scale=480:ih*480/iw -strict -2 -r 30 -pix_fmt yuv420p  "+completeNewFileName
                #cmd = "ffmpeg -i "+completeFileName+" -b 1500k -vcodec libx264 -preset slow -g 30 -t "+ str(video_duration-1)+" "+completeNewFileName
            else:
                cmd = "ffmpeg -i "+completeFileName+" -threads 0 "+completeNewFileName
                #cmd = "ffmpeg -i "+completeFileName+" -vcodec libx264 -preset fast -threads 0 -acodec libfdk_aac -vf scale=480:ih*480/iw -strict -2 -r 30 -pix_fmt yuv420p "+completeNewFileName
                #cmd = "ffmpeg -i "+completeFileName+" -vcodec libx264 -preset fast -maxrate 500k -bufsize 1000k -threads 0 -acodec libfdk_aac -b:a 128k "+completeNewFileName
                #cmd = "ffmpeg -i "+completeFileName+" -b 1500k -vcodec libx264 -preset slow -g 30 "+completeNewFileName
        if(destinationFormat == "webm"):
            video_duration = findVideoDuration(convertedDestination)[1]
            # fmpeg -i Video-2013_4_29_23_8_13.mov -b 1500k -vcodec libvpx -ab 160000 -strict -2 -f webm -g 30 _p_Video-2013_4_29_23_8_13.webm
            if(video_duration):
                print "^ video_duration: ",video_duration
                #cmd = "ffmpeg -i "+convertedDestination+" -vcodec libvpx -ab 160000 -strict -2 -f webm -t "+ str(video_duration-1)+" "+completeNewFileName
                #cmd = "ffmpeg -i "+convertedDestination+" -vcodec libvpx -ab 160000 -strict -2 -f webm -g 0 -t "+ str(video_duration-1)+" "+completeNewFileName
                cmd = "ffmpeg -i "+convertedDestination+" -codec:v libvpx -quality good -cpu-used 0 -b:v 500k -qmin 10 -qmax 42 -maxrate 500k -bufsize 1000k -threads 4 -t "+ str(video_duration-1)+" -vf scale=-1:480 -codec:a libvorbis -b:a 128k -r 30  "+completeNewFileName
            else:
                cmd = "ffmpeg -i "+convertedDestination+" -codec:v libvpx -quality good -cpu-used 0 -b:v 500k -qmin 10 -qmax 42 -maxrate 500k -bufsize 1000k -threads 4 -vf scale=-1:480 -codec:a libvorbis -b:a 128k -r 30 "+completeNewFileName
                #cmd = "ffmpeg -i "+convertedDestination+" -vcodec libvpx -ab 160000 -strict -2 -f webm -g 0 "+completeNewFileName
        print "     # ConvertVideo: ",cmd
        ## pp = subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,shell=True)
        if(destinationFormat == "mp4"): 
            checkDestination = completeFileName
        if(destinationFormat == "webm"):
            checkDestination = convertedDestination
        if(os.path.exists(checkDestination)):
            try:
                #p = pexpect.spawn(cmd)
                #indexes = ['Overwrite ?', 'No such file or directory', pexpect.EOF, pexpect.TIMEOUT]
                #index = p.expect(indexes)
                #print "^ Exit Status: ",p.exitstatus
                if(os.path.exists(completeNewFileName) is False):
                    # Let's make sure this file doesn't exist to prevent an overwirte message...
                    args = shlex.split(cmd)
                    pp = subprocess.call(args)
                else:
                    print " ! File Already Exists..."
                os.rename(completeNewFileName,completeDestination)
                if(destinationFormat == "mp4"):
                    os.rename(completeFileName,convertedDestination)
                writeToLogFile(str(FileName + " - Video conversion succsesful"))
                print " ^ video conversion done"
                return True
            except:
                writeToLogFile(str("ERROR: " + FileName + " - Couldn't convert video"))
                print " ^ video conversion failed"
                print "     Failed: Filename: ",Filename
                return False
        else: 
            print "^ video file doesn't exists"
            return False
Beispiel #49
0
    def __init__(self, landmarks_type, network_size=NetworkSize.LARGE,
                 enable_cuda=True, enable_cudnn=True, flip_input=False,
                 use_cnn_face_detector=False):
        self.enable_cuda = enable_cuda
        self.use_cnn_face_detector = use_cnn_face_detector
        self.flip_input = flip_input
        self.landmarks_type = landmarks_type
        base_path = os.path.join(appdata_dir('face_alignment'), "data")

        if not os.path.exists(base_path):
            os.makedirs(base_path)

        if enable_cudnn and self.enable_cuda:
            torch.backends.cudnn.benchmark = True

        # Initialise the face detector
        if self.use_cnn_face_detector:
            path_to_detector = os.path.join(
                base_path, "mmod_human_face_detector.dat")
            if not os.path.isfile(path_to_detector):
                print("Downloading the face detection CNN. Please wait...")

                path_to_temp_detector = os.path.join(
                    base_path, "mmod_human_face_detector.dat.download")

                if os.path.isfile(path_to_temp_detector):
                    os.remove(os.path.join(path_to_temp_detector))

                request_file.urlretrieve(
                    "https://www.adrianbulat.com/downloads/dlib/mmod_human_face_detector.dat",
                    os.path.join(path_to_temp_detector))

                os.rename(os.path.join(path_to_temp_detector),os.path.join(path_to_detector))

            self.face_detector = dlib.cnn_face_detection_model_v1(
                path_to_detector)

        else:
            self.face_detector = dlib.get_frontal_face_detector()

        # Initialise the face alignemnt networks
        self.face_alignment_net = FAN(int(network_size))
        if landmarks_type == LandmarksType._2D:
            network_name = '2DFAN-' + str(int(network_size)) + '.pth.tar'
        else:
            network_name = '3DFAN-' + str(int(network_size)) + '.pth.tar'
        fan_path = os.path.join(base_path, network_name)

        if not os.path.isfile(fan_path):
            print("Downloading the Face Alignment Network(FAN). Please wait...")

            fan_temp_path = os.path.join(base_path,network_name+'.download')

            if os.path.isfile(fan_temp_path):
                os.remove(os.path.join(fan_temp_path))

            request_file.urlretrieve(
                "https://www.adrianbulat.com/downloads/python-fan/" +
                network_name, os.path.join(fan_temp_path))

            os.rename(os.path.join(fan_temp_path),os.path.join(fan_path))

        fan_weights = torch.load(
            fan_path,
            map_location=lambda storage,
            loc: storage)

        self.face_alignment_net.load_state_dict(fan_weights)

        if self.enable_cuda:
            self.face_alignment_net.cuda()
        self.face_alignment_net.eval()

        # Initialiase the depth prediciton network
        if landmarks_type == LandmarksType._3D:
            self.depth_prediciton_net = ResNetDepth()
            depth_model_path = os.path.join(base_path, 'depth.pth.tar')
            if not os.path.isfile(depth_model_path):
                print(
                    "Downloading the Face Alignment depth Network (FAN-D). Please wait...")

                depth_model_temp_path = os.path.join(base_path, 'depth.pth.tar.download')

                if os.path.isfile(depth_model_temp_path):
                    os.remove(os.path.join(depth_model_temp_path))


                request_file.urlretrieve(
                    "https://www.adrianbulat.com/downloads/python-fan/depth.pth.tar",
                    os.path.join(depth_model_temp_path))

                os.rename(os.path.join(depth_model_temp_path),os.path.join(depth_model_path))

            depth_weights = torch.load(
                depth_model_path,
                map_location=lambda storage,
                loc: storage)
            depth_dict = {
                k.replace('module.', ''): v for k,
                v in depth_weights['state_dict'].items()}
            self.depth_prediciton_net.load_state_dict(depth_dict)

            if self.enable_cuda:
                self.depth_prediciton_net.cuda()
            self.depth_prediciton_net.eval()
# Created by Gotcha on 2017/10/13.
# !/usr/bin/env python
# -*- coding: utf-8 -*-

import os

for _, __, files in os.walk('./MBA_管理百科指定子类'):
    for file_name in files:
        old_name = './MBA_管理百科指定子类/' + file_name
        index = file_name.find('.')
        new_name = './MBA_管理百科指定子类/' + file_name[:index] + '.html'
        os.rename(old_name, new_name)
        print(file_name + 'done')
Beispiel #51
0
def postprocess_translations(reduce_diff_hacks=False):
    print('Checking and postprocessing...')

    if reduce_diff_hacks:
        global _orig_escape_cdata
        _orig_escape_cdata = ET._escape_cdata
        ET._escape_cdata = escape_cdata

    for (filename, filepath) in all_ts_files():
        os.rename(filepath, filepath + '.orig')

    have_errors = False
    for (filename, filepath) in all_ts_files('.orig'):
        # pre-fixups to cope with transifex output
        parser = ET.XMLParser(
            encoding='utf-8'
        )  # need to override encoding because 'utf8' is not understood only 'utf-8'
        with open(filepath + '.orig', 'rb') as f:
            data = f.read()
        # remove control characters; this must be done over the entire file otherwise the XML parser will fail
        data = remove_invalid_characters(data)
        tree = ET.parse(io.BytesIO(data), parser=parser)

        # iterate over all messages in file
        root = tree.getroot()
        for context in root.findall('context'):
            for message in context.findall('message'):
                numerus = message.get('numerus') == 'yes'
                source = message.find('source').text
                translation_node = message.find('translation')
                # pick all numerusforms
                if numerus:
                    translations = [
                        i.text for i in translation_node.findall('numerusform')
                    ]
                else:
                    translations = [translation_node.text]

                for translation in translations:
                    if translation is None:
                        continue
                    errors = []
                    valid = check_format_specifiers(source, translation,
                                                    errors, numerus)

                    for error in errors:
                        print('%s: %s' % (filename, error))

                    if not valid:  # set type to unfinished and clear string if invalid
                        translation_node.clear()
                        translation_node.set('type', 'unfinished')
                        have_errors = True

                # Remove location tags
                for location in message.findall('location'):
                    message.remove(location)

                # Remove entire message if it is an unfinished translation
                if translation_node.get('type') == 'unfinished':
                    context.remove(message)

        # check if document is (virtually) empty, and remove it if so
        num_messages = 0
        for context in root.findall('context'):
            for message in context.findall('message'):
                num_messages += 1
        if num_messages < MIN_NUM_MESSAGES:
            print('Removing %s, as it contains only %i messages' %
                  (filepath, num_messages))
            continue

        # write fixed-up tree
        # if diff reduction requested, replace some XML to 'sanitize' to qt formatting
        if reduce_diff_hacks:
            out = io.BytesIO()
            tree.write(out, encoding='utf-8')
            out = out.getvalue()
            out = out.replace(b' />', b'/>')
            with open(filepath, 'wb') as f:
                f.write(out)
        else:
            tree.write(filepath, encoding='utf-8')
    return have_errors
Beispiel #52
0
        # passrate=100-len(TestResult.failures)/TestResult.testsRun*100
        #
        # logging.info("fail the test: "+str(TestResult.failures))
        # logging.info("total fail: "+str(len(TestResult.failures)))
        # logging.info("total run: "+str(TestResult.testsRun))
        # logging.info("TestCases Pass Rate: "+str(passrate)+"%")

#
if __name__ == "__main__":
    if not os.path.exists("./picture"):
        os.mkdir("./picture")

    if not os.path.exists("./data"):
        os.mkdir("./data")
    else:
        os.rename("data","data"+time.strftime("%Y%m%d%H%M%S", time.localtime()))
        os.mkdir("./data")
    #定义测试用例,组成一个测试场景
    # testscene1=['testcase1','testcase2']
    #testscene1=['testcase3']
    # testscene2=['testcase2','testcase3']
    # testpath="E:\python_space/xingneng/test_login1.py"
    # testpath=['testlogin']
    #利用线程进行并发测试,三个参数分别表示测试场景、并发次数和持续时间,None表示一次并发
    thread1 = testThread(10)
    # thread2 = testThread(testscene2,100,100)

    monitor()
    thread1.start()
    thread1.join()
    teardownps()
import os

imgs = [name for name in os.listdir("data") if name.endswith(".jpeg")]

for img in imgs:
    if img.startswith('R'):
        os.rename(f"data/{img}", f"data/rock/{img}")
    if img.startswith('S'):
        os.rename(f"data/{img}", f"data/scissors/{img}")
    if img.startswith('P'):
        os.rename(f"data/{img}", f"data/paper/{img}")
Beispiel #54
0
def infer_on_stream(args, client):
    """
    Initialize the inference network, stream video to network,
    and output stats and video.

    :param args: Command line arguments parsed by `build_argparser()`
    :param client: MQTT client
    :return: None
    """
    # Initialise some counters:
    # total count -> to store total count of people
    # current people -> state of total number of people in current frames
    # last_two_counters -> to manage recognition errors. The same number of people
    # has to be recognized in three consecutive frames.
    # Frame counters of current people in order to calculate duration.

    single_image_mode = False

    total_count = 0
    current_people = 0
    last_n_counters = [0 for _ in range(SECUENCIAL_SAFE_FRAMES)]
    frame_counter = 0

    # queues used to calculate duration of each people.
    # It is asumed that when several are people in video their
    # follow first in first out behaviour. This does not have to be
    # true, but does not affect sum of all people duration.

    init_frames = queue.Queue()
    durations = queue.Queue()

    # Initialise the class
    infer_network = Network()
    # Set Probability threshold for detections
    prob_threshold = args.prob_threshold

    # Load the model through `infer_network`
    infer_network.load_model(args.model, args.device, args.cpu_extension)

    # Handle the input stream ###
    net_input_shape = infer_network.get_input_shape()

    # Check if input is a web-CAM, an image, or a video.
    if args.input == 'CAM':
        cap = cv2.VideoCapture(0)
        cap.open(0)
    else:
        resources_file_name = os.path.splitext(args.input)[0]
        resources_file_ext = os.path.splitext(args.input)[1]

        if resources_file_ext in ['.png', '.jpg', '.jpeg']:
            # Is a image. rename it in order to read as a sequence
            single_image_mode = True
            new_name = resources_file_name + '01' + resources_file_ext
            inp = os.rename(args.input, new_name)
            cap = cv2.VideoCapture(new_name, cv2.CAP_IMAGES)
            cap.open(new_name, cv2.CAP_IMAGES)
            os.rename(new_name, args.input)

        else:
            cap = cv2.VideoCapture(args.input)
            cap.open(args.input)

    # inizialize vide capture

    # Check frames per second of video or cam
    # in order to calculate time of person in frame.
    fps = cap.get(cv2.CAP_PROP_FPS)

    # Get width and heigh of video to calcule box positions.
    width = int(cap.get(3))
    height = int(cap.get(4))

    # Loop until stream is over

    while cap.isOpened():

        ### Read from the video capture ###

        flag, frame = cap.read()

        if not flag:
            break
        key_pressed = cv2.waitKey(60)

        ### Pre-process the image as needed ###

        #p_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))
        #p_frame = p_frame - 127.5
        #p_frame = p_frame * 0.007843
        #p_frame = p_frame.transpose((2,0,1))
        #p_frame = p_frame.reshape(1, *p_frame.shape)

        p_frame = infer_network.preproces_input(frame)

        # Increase frame_counter in each frame.
        frame_counter += 1

        ### Start asynchronous inference for specified request ###
        infer_network.exec_net(p_frame)

        ### Wait for the result ###

        if infer_network.wait() == 0:

            ### Get the results of the inference request ###

            result, infer_time = infer_network.get_output()
            infer_text = "inference time: " + str(round(infer_time, 3)) + " ms"
            font = cv2.FONT_HERSHEY_SIMPLEX
            fontScale = 0.4
            color = (255, 0, 0)
            org = (15, 15)

            frame = cv2.putText(frame, infer_text, org, font, fontScale, color,
                                1)

            current_count = 0
            safe_counter = 0

            for boxes in result[0][0]:
                if (boxes[1] == infer_network.get_person_classId()
                        and boxes[2] > prob_threshold):

                    x_1 = int(width * boxes[3])
                    y_1 = int(height * boxes[4])
                    x_2 = int(width * boxes[5])
                    y_2 = int(height * boxes[6])

                    frame = cv2.rectangle(frame, (x_1, y_1), (x_2, y_2),
                                          (255, 0, 0), 2)
                    current_count += 1

            # Safe control in order to minimize recoginition error.
            # A counter is considered valid when are the same in three
            # consecutives frames

            if all([l == current_count for l in last_n_counters]):
                safe_counter = current_count
            else:
                safe_counter = current_people

            for i in range(SECUENCIAL_SAFE_FRAMES - 1, 0, -1):
                last_n_counters[i] = last_n_counters[i - 1]

            last_n_counters[0] = current_count

            delta_people = safe_counter - current_people
            if delta_people > 0:
                for e in range(delta_people):
                    init_frames.put(frame_counter)

                total_count += delta_people
                current_people = safe_counter

            elif delta_people < 0:
                frames_duration = frame_counter - init_frames.get()
                durations.put(frames_duration / fps)
                current_people = safe_counter

            # Extract any desired stats from the results

            # Calculate and send relevant information on
            # current_count, total_count and duration to the MQTT server
            client.publish(
                "person",
                json.dumps({
                    "count": safe_counter,
                    "total": total_count
                }))
            # Topic "person": keys of "count" and "total"

            # Topic "person/duration": key of "duration"
            while not durations.empty():
                client.publish("person/duration",
                               json.dumps({"duration": durations.get()}))

        # Send the frame to the FFMPEG server

        if not single_image_mode:
            sys.stdout.buffer.write(frame)
            sys.stdout.flush()

        # Write an output image if `single_image_mode`

        if single_image_mode:
            resources_file_name = os.path.splitext(args.input)[0]
            resources_file_ext = os.path.splitext(args.input)[1]
            cv2.imwrite(
                resources_file_name + "_proccesed_" + resources_file_ext,
                frame)
Beispiel #55
0
    except:
        SWIG_SUPPORT = False
        sys.stderr.write(
            '\033[1;91mSWIG program is not avaiable. Using existing wrapper code, which might be problematic.\033[0m\n'
        )
    else:
        SWIG_SUPPORT = True
    #
    if SWIG_SUPPORT and (not os.path.isfile(WRAPPER_PY) or not os.path.isfile(WRAPPER_CPP) or \
        os.path.getmtime(WRAPPER_CPP) < max([os.path.getmtime(x) for x in [WRAPPER_I] + HEADER + CPP])):
        ret = subprocess.call(['swig'] + SWIG_OPTS +
                              ['-o', WRAPPER_CPP, WRAPPER_I],
                              shell=False)
        if ret != 0:
            sys.exit('Failed to generate cstatgen C++ extension.')
        os.rename('cstatgen.py', WRAPPER_PY)
except OSError as e:
    sys.exit('Failed to generate wrapper file: {0}'.format(e))

# Under linux/gcc, lib stdc++ is needed for C++ based extension.
libs = ['stdc++'] if sys.platform == 'linux2' else []
link_args = ["-lm", "-lz", "-lgsl", "-lgslcblas"]
#

compile_args_umich = [
    "-O3", "-shared", "-std=c++11", "-D_FILE_OFFSET_BITS=64",
    "-D__ZLIB_AVAILABLE__"
]  #, "-o","umichlib.so","-fPIC"]
# "-static", "-static-libgcc", "-static-libstdc++", "-fPIC"]
UMICH_FILES = getfn([
    "clusters/*.cpp", "libsrc/*.cpp", "merlin/*.cpp", "regression/*.cpp",
Beispiel #56
0
    def _run_symbiotic(self):
        restart_counting_time()

        # disable these optimizations, since LLVM 3.7 does
        # not have them
        self.options.disabled_optimizations = [
            '-aa',
            '-demanded-bits',  # not in 3.7
            '-globals-aa',
            '-forceattrs',  # not in 3.7
            '-inferattrs',
            '-rpo-functionattrs',  # not in 3.7
            '-tti',
            '-bdce',
            '-elim-avail-extern',  # not in 3.6
            '-float2int',
            '-loop-accesses'  # not in 3.6
        ]

        # compile all sources if the file is not given
        # as a .bc file
        if self.options.source_is_bc:
            self.llvmfile = self.sources[0]
        else:
            self._compile_sources()

        if not self.check_llvmfile(self.llvmfile, '-check-concurr'):
            dbg('Unsupported call (probably pthread API)')
            return report_results('unsupported call')

        # link the files that we got on the command line
        # and that we are required to link in on any circumstances
        self.link_unconditional()

        # remove definitions of __VERIFIER_* that are not created by us
        # and syntactically infinite loops
        passes = ['-prepare', '-remove-infinite-loops']

        memsafety = 'VALID-DEREF' in self.options.prp or \
             'VALID-FREE' in self.options.prp or \
             'VALID-MEMTRACK' in self.options.prp or \
             'MEMSAFETY' in self.options.prp
        if memsafety:
            # remove error calls, we'll put there our own
            passes.append('-remove-error-calls')
        elif 'UNDEF-BEHAVIOR' in self.options.prp or\
             'SIGNED-OVERFLOW' in self.options.prp:
            # remove the original calls to __VERIFIER_error and put there
            # new on places where the code exhibits an undefined behavior
            passes += ['-remove-error-calls', '-replace-ubsan']

        self.run_opt(passes=passes)

        # we want to link these functions before instrumentation,
        # because in those we need to check for invalid dereferences
        if memsafety:
            self.link_undefined()
            self.link_undefined()

        # now instrument the code according to properties
        self.instrument()

        passes = self._tool.prepare()
        if passes:
            self.run_opt(passes)

        # link with the rest of libraries if needed (klee-libc)
        self.link()

        # link undefined (no-op when prepare is turned off)
        # (this still can have an effect even in memsafety, since we
        # can link __VERIFIER_malloc0.c or similar)
        self.link_undefined()

        # slice the code
        if not self.options.noslice:
            self.perform_slicing()
        else:
            print_elapsed_time('INFO: Compilation, preparation and '\
                               'instrumentation time')

        # start a new time era
        restart_counting_time()

        # optimize the code after slicing and
        # before verification
        opt = get_optlist_after(self.options.optlevel)
        if opt:
            self.optimize(passes=opt)

        #FIXME: make this KLEE specific
        if not self.check_llvmfile(self.llvmfile):
            dbg('Unsupported call (probably floating handling)')
            return report_results('unsupported call')

        # there may have been created new loops
        passes = ['-remove-infinite-loops']
        passes += self._tool.prepare_after()
        self.run_opt(passes)

        # delete-undefined may insert __VERIFIER_make_symbolic
        # and also other funs like __errno_location may be included
        self.link_undefined()

        if self._linked_functions:
            print('Linked our definitions to these undefined functions:')
            for f in self._linked_functions:
                print_stdout('  ', print_nl=False)
                print_stdout(f)

        # XXX: we could optimize the code again here...
        print_elapsed_time(
            'INFO: After-slicing optimizations and preparation time')

        # tool's specific preprocessing steps
        self.preprocess_llvm()

        if not self.options.final_output is None:
            # copy the file to final_output
            try:
                os.rename(self.llvmfile, self.options.final_output)
                self.llvmfile = self.options.final_output
            except OSError as e:
                msg = 'Cannot create {0}: {1}'.format(
                    self.options.final_output, e.message)
                raise SymbioticException(msg)

        if not self.options.no_verification:
            print('INFO: Starting verification')
            found = self.run_verification()
        else:
            found = 'Did not run verification'

        return report_results(found)
Beispiel #57
0
def prefix_files(images_path, prefix):
    for image_name in os.listdir(images_path):
        image_path = images_path + "/" + image_name

        prefixed_image_path = images_path + "/" + prefix + "_" + image_name
        os.rename(image_path, prefixed_image_path)
    resultlayer = plt.imshow(results_grid, alpha = 0.75, cmap = 'coolwarm',  origin = "upper") #vmax = col_lim, vmin = -col_lim,

    cbar = fig.colorbar(resultlayer, ticks=[np.nanmin(results_grid), np.nanmax(results_grid)])
    cbar.ax.set_yticklabels(['Low', 'High'])

    plt.title(pathway + " - " + path_name)
    plt.axis("off")
    plt.savefig(output_image_file, dpi=600,  bbox_inches='tight')

    return output_image_file

# show ensembl data
#file_name= data_path + "Rep1_MOB_count_matrix-1.tsv"
#file_df=read_file(file_name, ensembl = True, clean = True)
#print(output)

#show reactome data
#file_name= data_path + "Ensembl2Reactome_All_Levels.txt"
#output= read_reactome(file_name, gene_name_start = "ENSG0")
#print(output)

#show plot

output= plot_results("R-HSA-1430728", output_image_file = "output5.png", interpolation_method = 'nearest', normalization = False)
os.rename("output5.png", data_path + "output5.png" )


# show scaling data
#output= process(file_df,"R-HSA-1430728", return_metrics = False, pathway_generator = pd.DataFrame())
#print(output)
def extract_zips(unzip_dir, files):

    # Handles de-duping; if there are multiple files with the same data,
    # we will use only one of the equivalent set. If there is an entry in
    # the filename dictionary, that takes precedence.
    md5sums = [md5(fn) for fn in files]
    mdict = defaultdict(list)
    for md, fn in zip(md5sums, files):
        mdict[md].append(fn)

    # Try to parse out at least the kit number by trying a series of regular
    # expressions. Adding regular expressions at the end of this list is safer
    # than at the beginning. Order is important - rules at top are matched
    # first.

    # constants used in filename regular expressions
    # groupings (?:xxx) are ignored
    ws = r'^[_]?'
    nam1 = r"[a-z]{0,20}|O\&#39;[a-z]{3,20}|O['][a-z]{3,20}"
    cname = r'([\w]{1,20})'  #matches unicode chars; also matches digits though
    pnam = r'\(' + nam1 + r'\)'
    nam2 = r'(?:' + nam1 + '|' + pnam + r')'
    ndate = r'(?:(201[1-8][\d]{4}|201[1-8]-\d\d-\d\d|\d{4}201[1-8]))'
    sep = r'[\-\s\._]'
    seps = r'[\-\s\._]?'
    sepp = r'[\-\s\._]+'
    sepp = r'_'
    sept = r'[\-\s\._]{3}'
    bigy = r'(?:big' + seps + r'y(?:data)?|ydna)'
    rslt = r'(?:results|data|rawdata|vcfdata|raw data|csvexport|raw_data|raw|bigyrawdata)'
    name = r'((?:' + nam2 + seps + '){1,3})'
    kit = r'(?:(?:kit|ftdna)?[ #]?)?([enhb1-9][0-9]{3,6})'
    rzip = r'zip(?:.zip)?'
    snps = r'(?:[\-_]{,3}(?:r[\-_]?)?(?:cts\d{3,6}|fcg\d{4,5}|fgc\d{3,5}x?|p312|z\d{3,5}|df\d{2,3}x?|l\d{2,3}x?|u152|rs\d{4}|d27|sry\d{4}|m222|l\d{4}|s\d{4,6}|mc14|a\d{3,5}|zz\d{2}|zp\d{2}|z\d{2,3}|s\d{3}|pf\d{4}|by\d{3,5}|u106|l2|y\d{4,5}|yp\d{4,5})){1,3}'
    plac = r'(?:Germany|England|UnknownOrigin|Sweden|France|United_?Kingdom|Scotland|Ireland|Netherlands|Europe|Luxembour?g|Wales|Poland|Italy|CzechRepublic|Russia|Puerto-Rico|Switzerland|Algeria|Denmark|Slovakia|US|USA)?'
    name_re = [
        #0 e.g. bigy-Treece-N4826.zip
        (re.compile(ws + sepp.join([bigy, name, kit, rzip]),
                    re.I), 'name', 'kit'),
        #1 e.g. bigy-N4826-Treece.zip
        (re.compile(ws + sepp.join([bigy, kit, name, rzip]),
                    re.I), 'kit', 'name'),
        #2 e.g. N4826-bigy-Treece.zip
        (re.compile(ws + sepp.join([kit, bigy, name, rzip]),
                    re.I), 'kit', 'name'),
        #3 e.g. Treece - N4826 - bigy.zip
        (re.compile(ws + name + sept + kit + sept + bigy + sep + r'?\.zip',
                    re.I), 'name', 'kit'),
        #4 e.g. Treece N4826 bigy.zip
        (re.compile(ws + sepp.join([name, kit, bigy, rzip]),
                    re.I), 'name', 'kit'),
        #5 e.g. Treece N4826 bigy results 20140808.zip
        (re.compile(ws + sepp.join([name, kit, bigy, rslt, ndate, rzip]),
                    re.I), 'name', 'kit'),
        #6 e.g. bigy-Treece-N4826-FGC1233.zip
        (re.compile(ws + sepp.join([bigy, name, kit, snps, rzip]),
                    re.I), 'name', 'kit'),
        #7 e.g. FGC1234-N4826-Treece-England-bigy-rawdata-20140708.zip
        (re.compile(
            ws + sepp.join([snps, kit, name, plac, bigy, rslt, ndate, rzip]),
            re.I), 'kit', 'name'),
        #8 e.g. FGC1234-N4826-Treece-bigy-rawdata-20140708.zip
        (re.compile(ws + sepp.join([snps, kit, name, bigy, rslt, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #9 e.g. FGC1234-Treece-N4826-bigy-rawdata-20140708.zip
        (re.compile(ws + sepp.join([snps, name, kit, bigy, rslt, ndate, rzip]),
                    re.I), 'name', 'kit'),
        #10 e.g. FGC1234-N4826-Treece-bigy-rawdata.zip
        (re.compile(ws + sepp.join([snps, kit, name, bigy, rslt, rzip]),
                    re.I), 'kit', 'name'),
        #11 e.g. FGC1234-N4826-Treece-bigy-rawdata.zip
        (re.compile(ws + sepp.join([snps, kit, name, bigy, rzip]),
                    re.I), 'kit', 'name'),
        #12 e.g. FGC1234-N4826-Treece-England-bigy-rawdata.zip
        (re.compile(ws + sepp.join([snps, kit, name, plac, rslt, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #13 e.g. N4826_Treece_US_BigY_RawData_2018-01-03.zip
        (re.compile(
            ws + sepp.join([kit, name, plac, bigy, rslt, ndate]) + '.zip',
            re.I), 'kit', 'name'),
        #14 e.g. FGC1234-N4826-Treece-bigy-rawdata.zip
        (re.compile(ws + sepp.join([snps, kit, name, rslt, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #15 e.g. FGC1234-N4826-Treece-bigy-20140708.zip
        (re.compile(ws + sepp.join([snps, kit, name, bigy, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #16 e.g. FGC1234-N4826-Treece-20140708.zip
        (re.compile(ws + sepp.join([snps, kit, name, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #17 e.g. N4826-Treece-bigy-data-20140708.zip
        (re.compile(ws + sepp.join([kit, name, bigy, rslt, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #18 e.g. N4826-bigy-Treece-20140708.zip
        (re.compile(ws + sepp.join([kit, bigy, name, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #19 e.g. FGC1234-Treece-N4826.zip
        (re.compile(ws + sepp.join([snps, name, kit, rzip]),
                    re.I), 'name', 'kit'),
        #20 e.g. FGC1234-Treece-N4826-bigy-rawdata.zip
        (re.compile(ws + sepp.join([snps, name, kit, bigy, rslt, rzip]),
                    re.I), 'name', 'kit'),
        #21 e.g. bigy-Lindström-548872.zip
        (re.compile(ws + sepp.join([bigy, cname, kit, rzip]),
                    re.I), 'name', 'kit'),
    ]

    trace(2, 'File names mapped, according to which regular expression:')
    # track counts - only for diagnostics
    cnt = defaultdict(int)
    # list of non-matching files
    nomatch = []
    # all of the file names we could parse
    fname_dict = {}

    bed_re = re.compile(r'(\b(?:\w*[^_/])?regions(?:\[\d\])?\.bed)')
    vcf_re = re.compile(r'(\b(?:\w*[^_/])?variants(?:\[\d\])?\.vcf)')
    zip_re = re.compile(r'(\b(?:\w*[^_/])?bigy.*\.zip)')
    mgroup = []
    for md, fnames in mdict.items():
        if md in rename_dict:
            kkit, nname = rename_dict[md]
            pathname = fnames[0]
            fname = os.path.split(pathname)[-1]
            rule = 'dm'
        else:
            for pathname in fnames:
                fname = os.path.split(pathname)[-1]
                if fname in rename_dict:
                    kkit, nname = rename_dict[fname]
                    rule = 'df'
                    break
            else:
                kkit = nname = None
        if not keepfile(md):
            trace(2, '{} skipped because of subsetting'.format(fname))
            continue

        if kkit:
            if kkit in ('None', '') and nname in ('None', ''):
                kkit = nname = None
                trace(
                    2, '{} skipped because of entry in mappings dictionary'.
                    format(fname))
            else:
                trace(
                    2, '{3:>2} {0:<50s} {1:<15s} {2:<10s}'.format(
                        fname, nname, kkit, rule))
                cnt[rule] += 1
            fname_dict[pathname] = kkit, nname
        else:
            pathname = fnames[0]  # only use one of the equivalent set
            if os.path.splitext(pathname)[-1].lower() != '.zip':
                trace(
                    2,
                    'Found foreigner hanging out in zip directory: {0}'.format(
                        fname))
                continue
            d = {}
            fname = os.path.split(pathname)[-1]
            for ii, (r, k1, k2) in enumerate(name_re):
                s = r.search(fname)
                if s:
                    d[k1] = s.groups()[0]
                    if k2:
                        d[k2] = s.groups()[1]
                    else:
                        d['name'] = 'Unknown'
                    d['name'] = name_preference(d['name'])
                    try:
                        trace(
                            2, '{3:>2} {0:<50s} {1:<15s} {2:<10s}'.format(
                                fname, d['name'], d['kit'], ii))
                        cnt[ii] += 1
                        fname_dict[pathname] = d['kit'], d['name']
                    except:
                        trace(0, 'FAILURE on filename:', fname)
                    break
            else:
                nomatch.append(pathname)
        if len(fnames) > 1:
            for eq in [p for p in fnames if p != pathname]:
                trace(2, '  -same: {}'.format(os.path.split(eq)[-1]))

    trace(2, 'Number of filenames not matched: {0}'.format(len(nomatch)))
    trace(2, 'Which expressions were matched:')

    def keyfunc(v):
        return '{0!s:0>2}'.format(v)

    for nn in sorted(cnt, key=keyfunc):
        trace(2, '{0:>2}: {1:>4}'.format(nn, cnt[nn]))

    if nomatch:
        trace(1, 'Files that did not match:')
        for ll in nomatch:
            trace(1, ll.strip())
    else:
        trace(1, 'All files matched a rule')

    zipcount = 0

    # keep track of what needs to be cleaned up
    emptydirs = []

    import zipfile

    for fname in fname_dict:
        kitnumber, kitname = fname_dict[fname]
        if kitnumber == None:
            continue
        if keep_files:
            vcffile = os.path.join(unzip_dir,
                                   '%s-%s.vcf' % (kitname, kitnumber))
            bedfile = os.path.join(unzip_dir,
                                   '%s-%s.bed' % (kitname, kitnumber))
            # no checking to see if the contents are good, but that's what was asked for
            if os.path.isfile(vcffile) and os.path.isfile(bedfile):
                trace(
                    1, '%s-%s already exists and keep flag - skipping' %
                    (kitname, kitnumber))
                continue
        try:
            zf = zipfile.ZipFile(fname)
        except:
            trace(
                0,
                'WARN: not a zip file: {} from {}'.format(fname, os.getcwd()))
            continue
        listfiles = zf.namelist()
        bedfile = vcffile = zipfname = None
        for ff in listfiles:
            dirname, basename = os.path.split(ff)
            if bed_re.search(basename):
                bedfile = ff
            elif vcf_re.search(basename):
                vcffile = ff
            elif zip_re.search(basename):
                zipfname = ff
            if dirname and (dirname not in emptydirs):
                emptydirs.append(dirname)
        if (not bedfile) or (not vcffile):
            if not zipfname:
                trace(0, 'WARN: missing data in ' + fname)
                continue
            else:
                try:
                    zf.extractall(unzip_dir, [
                        zipfname,
                    ])
                    emptydirs.append(os.path.join(unzip_dir, zipfname))
                    bedfile = 'regions.bed'
                    vcffile = 'variants.vcf'
                    zf = zipfile.ZipFile(os.path.join(unzip_dir, zipfname))
                except:
                    trace(0, 'WARN: missing data in ' + fname)
                    continue
        try:
            zf.extractall(unzip_dir, [bedfile, vcffile])
        except RuntimeError:
            trace(0, 'WARN: {} would not extract - encrypted?'.format(base))
        base = '%s-%s' % (kitname, kitnumber)

        fpath = os.path.join(unzip_dir, '%s')
        trace(3, fpath % base)
        if vars(namespace)['rename']:
            try:
                os.link(fname, (fpath % base) + '.zip')
                trace(1, 'ln {} {}.zip'.format(fname, (fpath % base)))
            except:
                shutil.copy2(fname, (fpath % base) + '.zip')
                trace(1, 'cp -p {} {}.zip'.format(fname, (fpath % base)))
            emptydirs.append(fpath % bedfile)
            emptydirs.append(fpath % vcffile)
        else:
            try:
                os.rename(fpath % bedfile, (fpath % base) + '.bed')
                os.rename(fpath % vcffile, (fpath % base) + '.vcf')
            except:
                trace(
                    0,
                    'WARN: could not identify VCF and/or BED file for ' + base)
        zipcount += 1

    trace(0, '%d new files extracted' % zipcount)

    # clean up any empty dirs unzip created
    if emptydirs:
        trace(3, 'Trying to remove droppings:')
        for dir in emptydirs:
            if os.path.isfile(dir):
                os.unlink(dir)
        for dir in emptydirs:
            if os.path.isfile(dir):
                continue
            try:
                dp = os.path.join(unzip_dir, dir)
                os.removedirs(dp)
                trace(3, '  {0}'.format(dp))
            except FileNotFoundError:
                pass
            except:
                trace(3, '  W! could not remove {0}'.format(dp))
                pass

    # list of file names we unzipped
    files = os.listdir(unzip_dir)
    return files
Beispiel #60
0
import os

classes = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
char = -1
k = 0
source = "English Font Image"
dist = "Training"
for sample, _, files in os.walk(source):
	i = 0
	if not os.path.exists(dist+os.sep+classes[char%26]):
		os.mkdir(dist+os.sep+classes[char%26])
	if char%26 == 0:
		k +=1 
	for file in files:
		
		
		if i % 4 == 0:
			os.rename(os.path.join(sample, file), dist+os.sep+classes[char%26]+os.sep+str(k)+'-'+file)
		i += 1
	char += 1