Exemplo n.º 1
0
 def dumpToFile(self, filename):
     ext = os.path.splitext(filename)[1].lower()
     if ext == '.svg':
         if self.fileSrc is not None:
             shutil.copyfile(self.fileSrc, filename)
     else:
         super(SVGCellWidget, self).dumpToFile(filename)
  def PublishManifest(self, manifest, version, build_id=None):
    """Publishes the manifest as the manifest for the version to others.

    Args:
      manifest: Path to manifest file to publish.
      version: Manifest version string, e.g. 6102.0.0-rc4
      build_id: Optional integer giving build_id of the build that is
                publishing this manifest. If specified and non-negative,
                build_id will be included in the commit message.
    """
    # Note: This commit message is used by master.cfg for figuring out when to
    #       trigger slave builders.
    commit_message = 'Automatic: Start %s %s %s' % (self.build_names[0],
                                                    self.branch, version)
    if build_id is not None and build_id >= 0:
      commit_message += '\nCrOS-Build-Id: %s' % build_id

    logging.info('Publishing build spec for: %s', version)
    logging.info('Publishing with commit message: %s', commit_message)
    logging.debug('Manifest contents below.\n%s', osutils.ReadFile(manifest))

    # Copy the manifest into the manifest repository.
    spec_file = '%s.xml' % os.path.join(self.all_specs_dir, version)
    osutils.SafeMakedirs(os.path.dirname(spec_file))

    shutil.copyfile(manifest, spec_file)

    # Actually push the manifest.
    self.PushSpecChanges(commit_message)
Exemplo n.º 3
0
def build_launcher(out_path, icon_path, file_desc, product_name, product_version,
                   company_name, entry_point, is_gui):

    src_ico = os.path.abspath(icon_path)
    target = os.path.abspath(out_path)

    file_version = product_version

    dir_ = os.getcwd()
    temp = tempfile.mkdtemp()
    try:
        os.chdir(temp)
        with open("launcher.c", "w") as h:
            h.write(get_launcher_code(entry_point))
        shutil.copyfile(src_ico, "launcher.ico")
        with open("launcher.rc", "w") as h:
            h.write(get_resource_code(
                os.path.basename(target), file_version, file_desc,
                "launcher.ico", product_name, product_version, company_name))

        build_resource("launcher.rc", "launcher.res")
        build_exe("launcher.c", "launcher.res", is_gui, target)
    finally:
        os.chdir(dir_)
        shutil.rmtree(temp)
Exemplo n.º 4
0
    def execute(self):
        settings = QSettings()
        lastDir = settings.value('Processing/lastModelsDir', '')
        filename = QFileDialog.getOpenFileName(self.toolbox,
                                               self.tr('Open model', 'AddModelFromFileAction'), lastDir,
                                               self.tr('Processing model files (*.model *.MODEL)', 'AddModelFromFileAction'))
        if filename:
            try:
                settings.setValue('Processing/lastModelsDir',
                                  QFileInfo(filename).absoluteDir().absolutePath())

                ModelerAlgorithm.fromFile(filename)
            except WrongModelException:
                QMessageBox.warning(
                    self.toolbox,
                    self.tr('Error reading model', 'AddModelFromFileAction'),
                    self.tr('The selected file does not contain a valid model', 'AddModelFromFileAction'))
                return
            except:
                QMessageBox.warning(self.toolbox,
                                    self.tr('Error reading model', 'AddModelFromFileAction'),
                                    self.tr('Cannot read file', 'AddModelFromFileAction'))
                return
            destFilename = os.path.join(ModelerUtils.modelsFolder(), os.path.basename(filename))
            shutil.copyfile(filename, destFilename)
            self.toolbox.updateProvider('model')
  def UpdateVersionFile(self, message, dry_run, push_to=None):
    """Update the version file with our current version."""

    if not self.version_file:
      raise VersionUpdateException('Cannot call UpdateVersionFile without '
                                   'an associated version_file')

    components = (('CHROMEOS_BUILD', self.build_number),
                  ('CHROMEOS_BRANCH', self.branch_build_number),
                  ('CHROMEOS_PATCH', self.patch_number),
                  ('CHROME_BRANCH', self.chrome_branch))

    with tempfile.NamedTemporaryFile(prefix='mvp') as temp_fh:
      with open(self.version_file, 'r') as source_version_fh:
        for line in source_version_fh:
          for key, value in components:
            line = re.sub(self.KEY_VALUE_PATTERN % (key,),
                          '%s=%s\n' % (key, value), line)
          temp_fh.write(line)

      temp_fh.flush()

      repo_dir = os.path.dirname(self.version_file)

      try:
        git.CreateBranch(repo_dir, PUSH_BRANCH)
        shutil.copyfile(temp_fh.name, self.version_file)
        _PushGitChanges(repo_dir, message, dry_run=dry_run, push_to=push_to)
      finally:
        # Update to the remote version that contains our changes. This is needed
        # to ensure that we don't build a release using a local commit.
        git.CleanAndCheckoutUpstream(repo_dir)
Exemplo n.º 6
0
    def export(self, dst_dir):
        logger.debug('export() *Entered*')
        logger.debug("  name="+self.name)
        if os.path.exists(dst_dir):
            shutil.rmtree(dst_dir)

        src_dir = self.files_root

        #FIXME: Separate tb_files to an own directory tree (src/tb/core_name ?)
        src_files = []
        if self.verilog:
            src_files += self.verilog.export()
        if self.vpi:
            src_files += self.vpi.export()

        dirs = list(set(map(os.path.dirname,src_files)))
        logger.debug("export src_files=" + str(src_files))
        logger.debug("export dirs=" + str(dirs))
        for d in dirs:
            if not os.path.exists(os.path.join(dst_dir, d)):
                os.makedirs(os.path.join(dst_dir, d))

        for f in src_files:
            if(os.path.exists(os.path.join(src_dir, f))):
                shutil.copyfile(os.path.join(src_dir, f), 
                                os.path.join(dst_dir, f))
            else:
                print("File " + os.path.join(src_dir, f) + " doesn't exist")
        logger.debug('export() -Done-')
Exemplo n.º 7
0
Arquivo: ssh.py Projeto: jaypei/salt
    def test_absent(self):
        '''
        ssh_known_hosts.absent
        '''
        known_hosts = os.path.join(integration.FILES, 'ssh', 'known_hosts')
        shutil.copyfile(known_hosts, KNOWN_HOSTS)
        if not os.path.isfile(KNOWN_HOSTS):
            self.skipTest(
                'Unable to copy {0} to {1}'.format(
                    known_hosts, KNOWN_HOSTS
                )
            )

        kwargs = {'name': 'github.com', 'user': '******', 'config': KNOWN_HOSTS}
        # test first
        ret = self.run_state('ssh_known_hosts.absent', test=True, **kwargs)
        self.assertSaltNoneReturn(ret)

        # remove once, the key is gone
        ret = self.run_state('ssh_known_hosts.absent', **kwargs)
        self.assertSaltStateChangesEqual(
            ret, GITHUB_FINGERPRINT, keys=('old', 'fingerprint')
        )

        # remove twice, nothing has changed
        ret = self.run_state('ssh_known_hosts.absent', **kwargs)
        self.assertSaltStateChangesEqual(ret, {})

        # test again
        ret = self.run_state('ssh_known_hosts.absent', test=True, **kwargs)
        self.assertSaltNoneReturn(ret)
Exemplo n.º 8
0
def WriteGNGEO(main_parser, x):
    # read config files
    # open(gngeo_cfg,'w').close()
    if os.path.exists(gngeo_cfg) == True:
        # backup current config
        shutil.copyfile(gngeo_cfg, gngeo_cfg + ".bak")
        config = open(gngeo_cfg).read()

        # read variables
        joystick = str(x)
        # gngeo 0.8 should look like this
        # p1control A=J0B2,B=J0B1,C=J0B3,D=J0B0,START=J0B9,COIN=J0B8,UP=J0a1,DOWN=J0a1,LEFT=J0A0,RIGHT=J0A0
        inputConfig = "\np" + str(x + 1) + "control "
        inputConfig += " A=J" + joystick + GNGEOHelper(main_parser, "input_b")
        inputConfig += " B=J" + joystick + GNGEOHelper(main_parser, "input_a")
        inputConfig += " C=J" + joystick + GNGEOHelper(main_parser, "input_y")
        inputConfig += " D=J" + joystick + GNGEOHelper(main_parser, "input_x")
        inputConfig += " START=J" + joystick + GNGEOHelper(main_parser, "input_start")
        inputConfig += " COIN=J" + joystick + GNGEOHelper(main_parser, "input_select")
        inputConfig += " UP=J" + joystick + GNGEOHelper(main_parser, "input_up")
        inputConfig += " DOWN=J" + joystick + GNGEOHelper(main_parser, "input_down")
        inputConfig += " LEFT=J" + joystick + GNGEOHelper(main_parser, "input_left")
        inputConfig += " RIGHT=J" + joystick + GNGEOHelper(main_parser, "input_right")

        # config  = config.replace('p' + joystick +'control',# 'p' + joystick + 'control'")
        config += inputConfig

        # write gngeorc
        with open(gngeo_cfg, "w") as configfile:
            configfile.write(config)
            configfile.close()
        return 0
    return -1
Exemplo n.º 9
0
def run_merge(filenames):
    """Merges all Skype databases to a new database."""
    dbs = [skypedata.SkypeDatabase(f) for f in filenames]
    db_base = dbs.pop()
    counts = collections.defaultdict(lambda: collections.defaultdict(int))
    postbacks = Queue.Queue()
    postfunc = lambda r: postbacks.put(r)
    worker = workers.MergeThread(postfunc)

    name, ext = os.path.splitext(os.path.split(db_base.filename)[-1])
    now = datetime.datetime.now().strftime("%Y%m%d")
    filename_final = util.unique_path("%s.merged.%s%s" %  (name, now, ext))
    print("Creating %s, using %s as base." % (filename_final, db_base))
    shutil.copyfile(db_base.filename, filename_final)
    db2 = skypedata.SkypeDatabase(filename_final)
    chats2 = db2.get_conversations()
    db2.get_conversations_stats(chats2)

    for db1 in dbs:
        chats = db1.get_conversations()
        db1.get_conversations_stats(chats)
        bar_total = sum(c["message_count"] for c in chats)
        bar_text = " Processing %.*s.." % (30, db1)
        bar = ProgressBar(max=bar_total, afterword=bar_text)
        bar.start()
        args = {"db1": db1, "db2": db2, "chats": chats,
                "type": "diff_merge_left"}
        worker.work(args)
        while True:
            result = postbacks.get()
            if "error" in result:
                print("Error merging %s:\n\n%s" % (db1, result["error"]))
                worker = None # Signal for global break
                break # break while True
            if "done" in result:
                break # break while True
            if "diff" in result:
                counts[db1]["chats"] += 1
                counts[db1]["msgs"] += len(result["diff"]["messages"])
                msgcounts = sum(c["message_count"] for c in result["chats"])
                bar.update(bar.value + msgcounts)
            if result["output"]:
                log(result["output"])
        if not worker:
            break # break for db1 in dbs
        bar.stop()
        bar.afterword = " Processed %s." % db1
        bar.update(bar_total)
        print

    if not counts:
        print("Nothing new to merge.")
        db2.close()
        os.unlink(filename_final)
    else:
        for db1 in dbs:
            print("Merged %s in %s from %s." %
                  (util.plural("message", counts[db1]["msgs"]),
                   util.plural("chat", counts[db1]["chats"]), db1))
        print("Merge into %s complete." % db2)
Exemplo n.º 10
0
def dload(symlist,barlist,strikelist,expirylist):
    print symlist,barlist
    global bar, sym
    trans_id = 0
    strikelist = [1]
    expirylist  = [1]
    for sym in symlist:
        print sym
        for bar in barlist:
            for strike in strikelist:
                for expiry in expirylist:
                    fname = DataDown+ today + '.' + sym + '.'  + bar.replace(' ','')+'.ddload.csv'
                    Mod_TicksUtile.backupTickfiles(fname)
                    ##########
                    duration = bardict[bar]
                    barspaced = bardictspaced[bar]
                    contract = Mod_ibutiles.create_contract(sym,strike,expiry)
                    ticktype = ticktypedict[sym]
                    print bar, sym, duration,ticktype, barspaced, strike, expiry
                    tws_conn.reqHistoricalData(trans_id, contract, '', duration, barspaced, ticktype, 0, 2)
                    trans_id = trans_id + 1  
                    sleep(20)
                    
                    tmp = DataDown+ today + '.' + 'tempdlfile' + '.ddload.csv'
                    fname = DataDown+ today + '.' + sym + '.'  + bar+'.ddload.csv'
                    shutil.copyfile(tmp,fname)
                    Mod_TicksUtile.throw_out_lastbar(fname)
Exemplo n.º 11
0
def _copy_contents(dst_dir, contents):
    items = {"dirs": set(), "files": set()}

    for path in contents:
        if isdir(path):
            items['dirs'].add(path)
        elif isfile(path):
            items['files'].add(path)

    dst_dir_name = basename(dst_dir)

    if dst_dir_name == "src" and len(items['dirs']) == 1:
        copytree(list(items['dirs']).pop(), dst_dir, symlinks=True)
    else:
        makedirs(dst_dir)
        for d in items['dirs']:
            copytree(d, join(dst_dir, basename(d)), symlinks=True)

    if not items['files']:
        return

    if dst_dir_name == "lib":
        dst_dir = join(dst_dir, mkdtemp(dir=dst_dir))

    for f in items['files']:
        copyfile(f, join(dst_dir, basename(f)))
Exemplo n.º 12
0
 def download(self, cameras, path):
     left_dir = os.path.join(path, 'left')
     right_dir = os.path.join(path, 'right')
     target_dir = os.path.join(path, 'raw')
     if not os.path.exists(target_dir):
         os.mkdir(target_dir)
     left_pages = [os.path.join(left_dir, x)
                   for x in sorted(os.listdir(left_dir))]
     right_pages = [os.path.join(right_dir, x)
                    for x in sorted(os.listdir(right_dir))]
     # Write the orientation as a JPEG comment to the end of the file
     if len(left_pages) != len(right_pages):
         logger.warn("The left and right camera produced an inequal"
                     " amount of images, please fix the problem!")
         logger.warn("Will not combine images")
         return
     if (self.config['first_page']
             and not self.config['first_page'].get(str) == 'left'):
         combined_pages = reduce(operator.add, zip(right_pages, left_pages))
     else:
         combined_pages = reduce(operator.add, zip(left_pages, right_pages))
     logger.info("Combining images.")
     for idx, fname in enumerate(combined_pages):
         fext = os.path.splitext(os.path.split(fname)[1])[1]
         target_file = os.path.join(target_dir, "{0:04d}{1}"
                                    .format(idx, fext))
         shutil.copyfile(fname, target_file)
     shutil.rmtree(right_dir)
     shutil.rmtree(left_dir)
Exemplo n.º 13
0
def cpMCNPproject(directory):
    wkdir=getcwd()
    if checkifMCNPproject(directory,1)==1:
        return 1
    elif checkifMCNPproject(wkdir,2)==2:
        return 2
    else:
        cards = [   path.join(directory,"cards/parameters.part"),
                    path.join(directory,"cards/materials.part"),
                    path.join(directory,"cards/source.part"),
                    path.join(directory,"cards/tallies.part"),
                    path.join(directory,"cards/traslations.part")]
        geom  = [   path.join(directory,"geom/cells.part"),
                    path.join(directory,"geom/surfaces.part")]
        for card in cards:
            try:
                copyfile(card, path.join(wkdir, "cards/",path.basename(card)))
            except Exception as e:
                print "\n\033[1;34mMCNPmanager cp error:\033[1;32m %s \033[0m\n" % (e)

        for g in geom:
            try:
                copyfile(g, path.join(wkdir, "geom/",path.basename(g)))
            except Exception as e:
                print "\n\033[1;34mMCNPmanager cp error:\033[1;32m %s \033[0m\n" % (e)
        return 0
def create_labelled_dataset(source_directory, destination_directory):

	#a random number appended at the end of the file names at the destination
	sample_no = 1
	file_paths = []

	#check if the destination directory exists or not, create a new one
	if not os.path.exists(destination_directory):
		os.makedirs(destination_directory)

	#read inside directories recursively
	#root stores the absolute path, dirs - directories and files - name of the files
	for root, dirs, files in os.walk(source_directory):
		print "Traversing ____________", root
		
		for _file in files:
			file_path = root + "/" + _file
			class_name = str(_file)[:3]
			dest_file_path = destination_directory + "/" + class_name
			
			#check if the class labeled directory exists or not, else create one
			if not os.path.exists(dest_file_path):
				os.makedirs(dest_file_path)

			sample_no += 1
			
			#copy the file from the source to the destination class labelled directory
			shutil.copyfile(file_path, dest_file_path + "/" + class_name + \
								str(sample_no) + ".tiff")
Exemplo n.º 15
0
    def test_upgrade_pstate_files(self):
        """
        Test whether the existing pstate files are correctly updated to 7.1.
        """
        os.makedirs(os.path.join(self.state_dir, STATEDIR_DLPSTATE_DIR))

        # Copy an old pstate file
        src_path = os.path.join(self.CONFIG_PATH, "download_pstate_70.state")
        shutil.copyfile(src_path, os.path.join(self.state_dir, STATEDIR_DLPSTATE_DIR, "download.state"))

        # Copy a corrupt pstate file
        src_path = os.path.join(self.CONFIG_PATH, "download_pstate_70_corrupt.state")
        corrupt_dest_path = os.path.join(self.state_dir, STATEDIR_DLPSTATE_DIR, "downloadcorrupt.state")
        shutil.copyfile(src_path, corrupt_dest_path)

        old_config = RawConfigParser()
        old_config.read(os.path.join(self.CONFIG_PATH, "tribler70.conf"))
        convert_config_to_tribler71(old_config, state_dir=self.state_dir)

        # Verify whether the section is correctly renamed
        download_config = RawConfigParser()
        download_config.read(os.path.join(self.state_dir, STATEDIR_DLPSTATE_DIR, "download.state"))
        self.assertTrue(download_config.has_section("download_defaults"))
        self.assertFalse(download_config.has_section("downloadconfig"))
        self.assertFalse(os.path.exists(corrupt_dest_path))

        # Do the upgrade again, it should not fail
        convert_config_to_tribler71(old_config, state_dir=self.state_dir)
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('--extension-dll',
                      help='The extension dll file.')
  parser.add_argument('--target-dir',
                      help='Target directory of with .dll and hook file.')

  args = parser.parse_args()

  if not os.path.isfile(args.extension_dll):
    print "Error: %s is not a file." % args.extension_dll
    sys.exit(2)

  dll_file = os.path.basename(args.extension_dll)
  if not (dll_file in MOUDLE_MAP.keys()):
    print "Error: can't found the module name for dll - " % args.extension_dll
    sys.exit(3)

  shutil.copyfile(args.extension_dll,
                  os.path.join(args.target_dir, dll_file))

  hooks_file = os.path.join(args.target_dir, "XWalkExtensionHooks.js")
  # Add extra line in the template js hooks to make the module hooks.
  open(hooks_file, 'w').write('var MODULE_NAME = "' +
                              MOUDLE_MAP[dll_file] + '";')
  open(hooks_file, 'a').write(open(DEFAULT_TEMPLATE, 'r').read())
Exemplo n.º 17
0
    def initialize(self, test, log):
        '''Does the init part of the test
        1.Finds initial count of entry in log
        2.Creates a file 'cron' under cron.d
        3.Backs up /etc/crontab
        4.Modifies /etc/crontab    '''
        self.log = log

        self.initial_count = self.count_log('Cron automation')
        f = open('/etc/cron.d/cron', 'w')
        f.write('''#!/bin/bash
touch  %s
echo 'Cron automation' >>  %s
        ''' % (self.log, self.log))
        f.close()
        utils.system('chmod +x /etc/cron.d/cron')
        shutil.copyfile('/etc/crontab', '/tmp/backup')
        f = open('/etc/crontab', 'w')
        f.write('* * * * * root run-parts /etc/cron.d/\n')
        f.close()
        if test == 'deny_cron':
            if os.path.exists('/etc/cron.d/jobs.deny'):
                shutil.move('/etc/cron.d/jobs.deny', '/tmp/jobs.deny')
            f = open('/etc/cron.d/jobs.deny', 'w')
            f.write('cron')
            f.close()
        elif test == 'allow_cron' :
            os.remove('/etc/cron.d/jobs.deny')
            if os.path.exists('/etc/cron.d/jobs.allow'):
                shutil.move('/etc/cron.d/jobs.allow', '/tmp/jobs.allow')
            f = open('/etc/cron.d/jobs.allow', 'w')
            f.write('cron')
            f.close()
Exemplo n.º 18
0
def copy_inputs(config_file, InputsDir):

    config_dict = read_config(config_file)

    config = SafeConfigParser()
    config.optionxform = str
    config.read(config_file)

    new_config = os.path.join(InputsDir, os.path.split(config_file)[1])

    # ---------------------------------------------------------------- #
    # copy the inputs
    for key, section in config_dict.iteritems():
        if 'FILE_NAME' in section.keys():
            new_file_name = os.path.join(InputsDir,
                                         os.path.split(section['FILE_NAME'])[1])

            copyfile(section['FILE_NAME'], new_file_name)

            # update the config file for an easy restart
            config.set(key, 'FILE_NAME',
                       os.path.join(InputsDir,
                                    os.path.split(section['FILE_NAME'])[1]))

            # update the config_dict with the new value
            config_dict[key]['FILE_NAME'] = new_file_name
    # ---------------------------------------------------------------- #

    # ---------------------------------------------------------------- #
    # write the new configuration file
    with open(new_config, 'w') as configfile:
        config.write(configfile)
    # ---------------------------------------------------------------- #

    return config_dict
Exemplo n.º 19
0
def copy_rails_config_file(directory, filename, flag=0):
    """
    Description:

    Vars:
        1. f_dst: source file name
        2. f_src: dst file name
        3. flag
            1 => copy ".project"
            2 => copy "do_heroku.bat"
        4. content: content of ".project"
        5. fin: file object
        6. fout: file object
        7. directory: full directory path
    """
    f_dst = os.path.join(os.getcwd(), filename)
    f_src = os.path.join(os.path.dirname(inspect.currentframe().f_code.co_filename), directory, filename)
    if flag == 1:
        fin = open(f_src, "r")
        #        content = f.read()
        content = fin.read()
        content = content.replace("@project_name@", os.path.basename(os.getcwd()))
        #                    os.path.basename(f_dst))
        fout = open(f_dst, "w")
        fout.write(content)

        fin.close()
        fout.close()

    else:
        shutil.copyfile(f_src, f_dst)
    print "File copied"
    print "\t", "From: %s" % f_src
    print "\t", "To: %s" % f_dst
Exemplo n.º 20
0
	def save_db(self, userpath):
		
		# create the folder to save it by profile
		relative_path = constant.folder_name + os.sep + 'firefox'
		if not os.path.exists(relative_path):
			os.makedirs(relative_path)
		
		relative_path += os.sep + os.path.basename(userpath)
		if not os.path.exists(relative_path):
			os.makedirs(relative_path)
		
		# Get the database name
		if os.path.exists(userpath + os.sep + 'logins.json'):
			dbname = 'logins.json'
		elif os.path.exists(userpath + os.sep + 'signons.sqlite'):
			dbname = 'signons.sqlite'
		
		# copy the files (database + key3.db)
		try:
			ori_db = userpath + os.sep + dbname
			dst_db = relative_path + os.sep + dbname
			shutil.copyfile(ori_db, dst_db)
			print_debug('INFO', '%s has been copied here: %s' % (dbname, dst_db))
		except Exception,e:
			print_debug('DEBUG', '{0}'.format(e))
			print_debug('ERROR', '%s has not been copied' % dbname)
Exemplo n.º 21
0
 def ensure_file_exists(self, src, target):
     target = os.path.abspath(target)
     if not os.path.exists(target):
         self.makedirs(os.path.split(target)[0])
         shutil.copyfile(src, target)
         if USERNAME == "root":
             os.chown(target, self.uid, self.uid)
Exemplo n.º 22
0
    def write_oplog_progress(self):
        """ Writes oplog progress to file provided by user
        """

        if self.oplog_checkpoint is None:
            return None

        with self.oplog_progress as oplog_prog:
            oplog_dict = oplog_prog.get_dict()
        items = [[name, util.bson_ts_to_long(oplog_dict[name])]
                 for name in oplog_dict]
        if not items:
            return

        # write to temp file
        backup_file = self.oplog_checkpoint + '.backup'
        os.rename(self.oplog_checkpoint, backup_file)

        # for each of the threads write to file
        with open(self.oplog_checkpoint, 'w') as dest:
            if len(items) == 1:
                # Write 1-dimensional array, as in previous versions.
                json_str = json.dumps(items[0])
            else:
                # Write a 2d array to support sharded clusters.
                json_str = json.dumps(items)
            try:
                dest.write(json_str)
            except IOError:
                # Basically wipe the file, copy from backup
                dest.truncate()
                with open(backup_file, 'r') as backup:
                    shutil.copyfile(backup, dest)

        os.remove(backup_file)
Exemplo n.º 23
0
  def write_hash_manifests(self):
    if not self.manifests_updated:
      return False

    today = datetime.datetime.strftime(
      datetime.datetime.now(), "%Y%m%d%H%M%S")
    for alg in set(self.algorithms):
      manifest_path = os.path.join(self.path, 'manifest-{}.txt'.format(alg))
      copy_manifest_path = os.path.join(self.path, 'manifest-{}-{}.old'.format(alg, today))
      try:
        shutil.copyfile(manifest_path, copy_manifest_path)
      except:
        LOGGER.error("Do not have permission to write new manifests")
      else:
        self.add_premisevent(process = "Copy Bag Manifest",
          msg = "{} copied to {} before writing new manifest".format(
            os.path.basename(manifest_path),
            os.path.basename(copy_manifest_path)),
          outcome = "Pass", sw_agent = sys._getframe().f_code.co_name)

      try:
        with open(manifest_path, 'w') as manifest:
          for payload_file, hashes in self.entries.items():
            if payload_file.startswith("data" + os.sep):
              manifest.write("{} {}\n".format(hashes[alg], bagit._encode_filename(payload_file)))
      except:
        LOGGER.error("Do not have permission to overwrite hash manifests")
      else:
        LOGGER.info("{} written".format(manifest_path))
        self.add_premisevent(process = "Write Bag Manifest",
          msg = "{} written as a result of new or updated payload files".format(
            os.path.basename(manifest_path)),
          outcome = "Pass", sw_agent = sys._getframe().f_code.co_name)

    return True
Exemplo n.º 24
0
Arquivo: gcov.py Projeto: gcovr/gcovr
def select_gcov_files_from_stdout(out, gcov_filter, gcov_exclude, logger, chdir, tempdir):
    active_files = []
    all_files = []

    for line in out.splitlines():
        found = output_re.search(line.strip())
        if found is None:
            continue

        fname = found.group(1)
        full = os.path.join(chdir, fname)
        all_files.append(full)

        filtered, excluded = apply_filter_include_exclude(
            fname, gcov_filter, gcov_exclude)

        if filtered:
            logger.verbose_msg("Filtering gcov file {}", fname)
            continue

        if excluded:
            logger.verbose_msg("Excluding gcov file {}", fname)
            continue

        if tempdir and tempdir != chdir:
            import shutil
            active_files.append(os.path.join(tempdir, fname))
            shutil.copyfile(full, active_files[-1])
        else:
            active_files.append(full)

    return active_files, all_files
Exemplo n.º 25
0
    def __init__(self, db_session, db_migrate, sql_connection,
                 sqlite_db, sqlite_clean_db):
        self.sql_connection = sql_connection
        self.sqlite_db = sqlite_db
        self.sqlite_clean_db = sqlite_clean_db

        self.engine = db_session.get_engine()
        self.engine.dispose()
        conn = self.engine.connect()
        if sql_connection == "sqlite://":
            if db_migrate.db_version() > db_migrate.db_initial_version():
                return
        else:
            testdb = os.path.join(CONF.state_path, sqlite_db)
            if os.path.exists(testdb):
                return
        db_migrate.db_sync()
#        self.post_migrations()
        if sql_connection == "sqlite://":
            conn = self.engine.connect()
            self._DB = "".join(line for line in conn.connection.iterdump())
            self.engine.dispose()
        else:
            cleandb = os.path.join(CONF.state_path, sqlite_clean_db)
            shutil.copyfile(testdb, cleandb)
Exemplo n.º 26
0
def main():

    feature_file_path1 = "feature/test_app/"
    feature_file_path2 = "feature/test_app_new/"
    trg_file_path = "feature/cleaned_test_app/"

    with con:
        cur = con.cursor()
        sql = "select permalink, next_round from bayarea_post2012_fewer4;"
        cur.execute(sql)
        results = cur.fetchall()

        for result in results:
            permalink = result[0]
            next_round = result[1]

            file_name = permalink + "_next_" + next_round + ".csv"

            file1 = feature_file_path1 + file_name
            file2 = feature_file_path2 + file_name
            target = trg_file_path + file_name

            if os.path.exists(file2):
                shutil.copyfile(file2, target)

            elif os.path.exists(file1):
                shutil.copyfile(file1, target)
Exemplo n.º 27
0
 def makeTemp(self, *filenames):
     tmp = self.mktemp()
     os.mkdir(tmp)
     for filename in filenames:
         tmpFile = os.path.join(tmp, filename)
         shutil.copyfile(sp(filename), tmpFile)
     return tmp
Exemplo n.º 28
0
def storeFile(tmpFile, copyLocation, symLocation):
    shutil.copyfile(tmpFile, copyLocation)
    try:
        os.remove(symLocation)
    except:
        pass
    os.symlink(copyLocation, symLocation)
Exemplo n.º 29
0
def estimatePloidy(tmpdir, workdir, snpSegfile):
    """
    Runs extract_cnv.R, bedtools intersect, and base_cnv.R.
    extract_cnv.R expects cnv.result<ploidy> and outputs cnv<ploidy>
    bedtools then intersects the cnv<ploidy> file with the snpSegfile created in segmentRatio
    base_cnv uses the intersectfiles to determine the correct ploidy, which it writes to a file named ploidy
    The corresponding file is then moved to the working directory
    """
    rScriptName = os.path.join(scriptPath,"extract_cnv.R")
    subprocess.check_call(['Rscript', rScriptName, tmpdir])
    for i in ["2", "3", "4"]:
        cnvfile = os.path.join(tmpdir, 'cnv' + i)
        outfile = os.path.join(tmpdir, 'cnv' + i + "_baf.txt")
        with open(outfile, 'w') as o:
            subprocess.check_call([
                'bedtools', 'intersect', 
                '-a', snpSegfile,
                '-b', cnvfile,
                '-wb'
            ], stdout = o)
        
    rScriptName = os.path.join(scriptPath,"base_cnv.R")
    subprocess.check_call(['Rscript', rScriptName, tmpdir, workdir])

    # now move the cnv results with the selected ploidy to the output file
    ploidy=open(os.path.join(workdir, "ploidy")).readline().strip()
    shutil.copyfile(os.path.join(tmpdir, "cnv.result" + ploidy), os.path.join(workdir, "cnv.result"))         
Exemplo n.º 30
0
def saveDocsInfo(part_id, helper, corpus, doc_ids, token_doc_file_name, doc_id_to_text):
	""" Сохраняем токены и отборажение токен->документ. """
	is_first_doc = True
	for doc_id in doc_ids:
		# Сохраняем для документа его исходный текст
		source_doc_path = os.path.join(doc_id_to_text[doc_id])
		target_doc_path = os.path.join(helper.get_output_dir_path(part_id), str(doc_id) + ".txt")
		copyfile(source_doc_path, target_doc_path)
		try:
			doc_tokens = list(corpus.get_document(doc_id).tokens.values())
		except:
			print("Bad document id: " + str(doc_id))
			continue
		doc_tokens.sort(key = lambda x: x.pos)
		with open(os.path.join(helper.get_output_dir_path(part_id), str(doc_id) + ".tokens"), "w",
				encoding="utf-8") as token_file:
			for token  in doc_tokens:
				token_file.write(str(token.id) + " ")
				token_file.write(str(token.pos) + " ")
				token_file.write(str(token.length) + " ")
				token_file.write(str(token.text) + "\n")
		file_mode = "w"
		if is_first_doc:
			is_first_doc = False
		else:
			file_mode = "a"
		with open(token_doc_file_name, file_mode, encoding="utf-8") as token_doc_file:
			for token  in doc_tokens:
				token_doc_file.write(str(token.id) + " ")
				token_doc_file.write(str(doc_id) + "\n")
Exemplo n.º 31
0
def chain_samples(dirs, names, group_filename, gff_filename, count_filename, field_to_use='count_fl', fuzzy_junction=0, allow_5merge=False, max_3_diff=100, fastq_filename=None):
    for d in dirs.values():
        sample_sanity_check(os.path.join(d, group_filename),\
                            os.path.join(d, gff_filename),\
                            os.path.join(d, count_filename),\
                            os.path.join(d, fastq_filename) if fastq_filename is not None else None)

    count_header, count_info = read_count_info(count_filename, dirs, field_to_use)

    # some names may already start with "tmp_" which means they are intermediate results that have already been chained
    # find the first non "tmp_" and start from there
    if names[0].startswith('tmp_'):
        chain = []
        for start_i,name in enumerate(names):
            if name.startswith('tmp_'):
                chain.append(name[4:])
            else:
                break
        # start_i, name now points at the first "non-tmp" sample
        # we want to go to the last tmp_ sample and read it
        name = names[start_i-1][4:] # this is the last tmp_ sample, let's read it
        o = sp.MegaPBTree('tmp_'+name+'.gff', 'tmp_'+name+'.group.txt', self_prefix='tmp_'+name, \
                        internal_fuzzy_max_dist=fuzzy_junction, \
                        allow_5merge=allow_5merge, \
                        max_3_diff=max_3_diff, \
                        fastq_filename='tmp_'+name+'.rep.fq' if fastq_filename is not None else None)
        #chain.append(name) # no need, already done above
    else: # everything is new, start fresh
        name = names[0]
        d = dirs[name]
        chain = [name]
        o = sp.MegaPBTree(os.path.join(d, gff_filename), os.path.join(d, group_filename), \
                        self_prefix=name, internal_fuzzy_max_dist=fuzzy_junction, \
                        allow_5merge=allow_5merge, \
                        max_3_diff=max_3_diff, \
                        fastq_filename=os.path.join(d, fastq_filename) if fastq_filename is not None else None)
        start_i = 1


    for name in names[start_i:]:
        assert not name.startswith('tmp_')
        d = dirs[name]
        o.add_sample(os.path.join(d, gff_filename), os.path.join(d, group_filename), \
                     sample_prefix=name, output_prefix='tmp_'+name, \
                     fastq_filename=os.path.join(d, fastq_filename) if fastq_filename is not None else None)
        o = sp.MegaPBTree('tmp_'+name+'.gff', 'tmp_'+name+'.group.txt', self_prefix='tmp_'+name, \
                          internal_fuzzy_max_dist=fuzzy_junction, \
                          allow_5merge=allow_5merge, \
                          max_3_diff=max_3_diff, \
                          fastq_filename='tmp_'+name+'.rep.fq' if fastq_filename is not None else None)
        chain.append(name)

    # now recursively chain back by looking at mega_info.txt!!!
    d = {} # ex: (tmp_1009, PB.1.1) --> mega info dict
    for c in chain[1:]:
        for r in DictReader(open('tmp_' + c + '.mega_info.txt'),delimiter='\t'):
            d['tmp_'+c, r['superPBID']] = r

    f1 = open('all_samples.chained_ids.txt', 'w')
    writer1 = DictWriter(f1, fieldnames=['superPBID']+chain, delimiter='\t')
    writer1.writeheader()
    f2 = open('all_samples.chained_count.txt', 'w')
    writer2 = DictWriter(f2, fieldnames=['superPBID']+chain, delimiter='\t')
    writer2.writeheader()

    reader = DictReader(open('tmp_' + chain[-1] + '.mega_info.txt'),delimiter='\t')
    for r in reader:
        saw_NA = False
        r0 = r
        answer = defaultdict(lambda: 'NA') # ex: 1009 --> PB.1.1
        answer2 = defaultdict(lambda: 'NA') # ex: 1009 --> count
        answer[chain[-1]] = r[chain[-1]]
        if r[chain[-1]] !='NA':
            answer2[chain[-1]] = count_info[chain[-1], answer[chain[-1]]]
        for c in chain[::-1][1:-1]:  # the first sample does not have tmp_, because it's not a chain
            if r['tmp_'+c] == 'NA':
                saw_NA = True
                break
            else:
                r2 = d['tmp_'+c, r['tmp_'+c]]
                answer[c] = r2[c]
                if answer[c] != 'NA':
                    answer2[c] = count_info[c, answer[c]]
                r = r2
        if not saw_NA:
            answer[chain[0]] = r[chain[0]]
            if answer[chain[0]] !='NA':
                answer2[chain[0]] = count_info[chain[0], answer[chain[0]]]

        rec1 = {'superPBID': r0['superPBID']}
        rec2 = {'superPBID': r0['superPBID']}
        for c in chain:
            rec1[c] = answer[c]
            rec2[c] = str(answer2[c])
        writer1.writerow(rec1)
        writer2.writerow(rec2)
    f1.close()
    f2.close()

    shutil.copyfile('tmp_' + chain[-1] + '.gff', 'all_samples.chained.gff')
    if fastq_filename is not None:
        shutil.copyfile('tmp_' + chain[-1] + '.rep.fq', 'all_samples.chained.rep.fq')

    print("Chained output written to:", file=sys.stdout)
    print("all_samples.chained.gff", file=sys.stdout)
    print(f1.name, file=sys.stdout)
    print(f2.name, file=sys.stdout)
    if fastq_filename is not None:
        print("all_samples.chained.rep.fq", file=sys.stdout)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
    torch.save(state, filename)
    if is_best:
        shutil.copyfile(filename, 'model_best.pth.tar')
Exemplo n.º 33
0
                             connection.sendall(data)
                             size = 0
                             error_counter = 0
                             break
                         except socket.error, ex:
                             error_counter = error_counter + 1
                             print str(error_counter) + str(size)
                             time.sleep(0.1)
                             if error_counter > 50:
                                 connection.close()
                                 break
                 photo.close()
                 print >> sys.stderr, 'sending complete ...'
                 name = str(round(time.time(), 0))
                 name = name[0:name.find('.')]
                 copyfile(pic, 'save/' + name + ".jpg")
                 time.sleep(2)
                 os.remove(pic)
             error_counter = 0
             connection.close()
         else:
             print >> sys.stderr, 'wrong key ...'
             connection.close()
     else:
         print >> sys.stderr, 'wrong ID ...'
         connection.close()
 except:
     print >> sys.stderr, 'disconect with error ...'
     connection.close()
 finally:
     # Clean up the connection
Exemplo n.º 34
0
def save_checkpoint(state, is_best, checkpoint=args.out, filename='checkpoint.pth.tar'):
    filepath = os.path.join(checkpoint, filename)
    torch.save(state, filepath)
    if is_best:
        shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
Exemplo n.º 35
0
args = parser.parse_args()


def sha1(path):
    """Calculate hashes of all png files in the test/directory"""
    with open(path, "rb") as f:
        return hashlib.sha1(f.read()).hexdigest()


filehashes = {sha1(p): p for p in Path(".").glob("**/*-snapshots/*-linux.png")}

# For every json file in data directory except report.json
data_dir = Path(args.report).expanduser().resolve() / "data"
for p in data_dir.glob("*.json"):
    if p.name == "report.json":
        continue
    with open(p, "rb") as f:
        z = json.load(f)
    for t in z["tests"]:
        if t["outcome"] != "unexpected":
            continue
        for r in t["results"]:
            for attachment in r["attachments"]:
                if attachment["name"] == "expected":
                    expected = Path(attachment["path"]).stem
                elif attachment["name"] == "actual":
                    actual = data_dir / Path(attachment["path"]).name
            if expected and attachment and expected in filehashes:
                shutil.copyfile(actual, filehashes[expected])
                print(f"{actual} -> {filehashes[expected]}")
Exemplo n.º 36
0
def makeDocument(filename, pageCallBack=None):
    #the extra arg is a hack added later, so other
    #tests can get hold of the canvas just before it is
    #saved
    global titlelist, closeit
    titlelist = []
    closeit = 0

    c = canvas.Canvas(filename)
    c.setPageCompression(0)
    c.setPageCallBack(pageCallBack)
    framePageForm(c)  # define the frame form
    c.showOutline()

    framePage(c, 'PDFgen graphics API test script')
    makesubsection(c, "PDFgen", 10 * inch)

    #quickie encoding test: when canvas encoding not set,
    #the following should do (tm), (r) and (c)
    msg_uni = u'copyright\u00A9 trademark\u2122 registered\u00AE ReportLab in unicode!'
    msg_utf8 = msg_uni.replace('unicode', 'utf8').encode('utf8')
    c.drawString(100, 100, msg_uni)
    c.drawString(100, 80, msg_utf8)

    t = c.beginText(inch, 10 * inch)
    t.setFont('Times-Roman', 10)
    drawCrossHairs(c, t.getX(), t.getY())
    t.textLines("""
The ReportLab library permits you to create PDF documents directly from
your Python code. The "pdfgen" subpackage is the lowest level exposed
to the user and lets you directly position test and graphics on the
page, with access to almost the full range of PDF features.
  The API is intended to closely mirror the PDF / Postscript imaging
model.  There is an almost one to one correspondence between commands
and PDF operators.  However, where PDF provides several ways to do a job,
we have generally only picked one.
  The test script attempts to use all of the methods exposed by the Canvas
class, defined in reportlab/pdfgen/canvas.py
  First, let's look at text output.  There are some basic commands
to draw strings:
-    canvas.setFont(fontname, fontsize [, leading])
-    canvas.drawString(x, y, text)
-    canvas.drawRightString(x, y, text)
-    canvas.drawCentredString(x, y, text)

The coordinates are in points starting at the bottom left corner of the
page.  When setting a font, the leading (i.e. inter-line spacing)
defaults to 1.2 * fontsize if the fontsize is not provided.

For more sophisticated operations, you can create a Text Object, defined
in reportlab/pdfgen/testobject.py.  Text objects produce tighter PDF, run
faster and have many methods for precise control of spacing and position.
Basic usage goes as follows:
-   tx = canvas.beginText(x, y)
-   tx.textOut('Hello')    # this moves the cursor to the right
-   tx.textLine('Hello again') # prints a line and moves down
-   y = tx.getY()       # getX, getY and getCursor track position
-   canvas.drawText(tx)  # all gets drawn at the end

The green crosshairs below test whether the text cursor is working
properly.  They should appear at the bottom left of each relevant
substring.
""")

    t.setFillColorRGB(1, 0, 0)
    t.setTextOrigin(inch, 4 * inch)
    drawCrossHairs(c, t.getX(), t.getY())
    t.textOut('textOut moves across:')
    drawCrossHairs(c, t.getX(), t.getY())
    t.textOut('textOut moves across:')
    drawCrossHairs(c, t.getX(), t.getY())
    t.textOut('textOut moves across:')
    drawCrossHairs(c, t.getX(), t.getY())
    t.textLine('')
    drawCrossHairs(c, t.getX(), t.getY())
    t.textLine('textLine moves down')
    drawCrossHairs(c, t.getX(), t.getY())
    t.textLine('textLine moves down')
    drawCrossHairs(c, t.getX(), t.getY())
    t.textLine('textLine moves down')
    drawCrossHairs(c, t.getX(), t.getY())

    t.setTextOrigin(4 * inch, 3.25 * inch)
    drawCrossHairs(c, t.getX(), t.getY())
    t.textLines(
        'This is a multi-line\nstring with embedded newlines\ndrawn with textLines().\n'
    )
    drawCrossHairs(c, t.getX(), t.getY())
    t.textLines(['This is a list of strings', 'drawn with textLines().'])
    c.drawText(t)

    t = c.beginText(2 * inch, 2 * inch)
    t.setFont('Times-Roman', 10)
    drawCrossHairs(c, t.getX(), t.getY())
    t.textOut('Small text.')
    drawCrossHairs(c, t.getX(), t.getY())
    t.setFont('Courier', 14)
    t.textOut('Bigger fixed width text.')
    drawCrossHairs(c, t.getX(), t.getY())
    t.setFont('Times-Roman', 10)
    t.textOut('Small text again.')
    drawCrossHairs(c, t.getX(), t.getY())
    c.drawText(t)

    #try out the decimal tabs high on the right.
    c.setStrokeColor(colors.silver)
    c.line(7 * inch, 6 * inch, 7 * inch, 4.5 * inch)

    c.setFillColor(colors.black)
    c.setFont('Times-Roman', 10)
    c.drawString(6 * inch, 6.2 * inch, "Testing decimal alignment")
    c.drawString(6 * inch, 6.05 * inch, "- aim for silver line")
    c.line(7 * inch, 6 * inch, 7 * inch, 4.5 * inch)

    c.drawAlignedString(7 * inch, 5.8 * inch, "1,234,567.89")
    c.drawAlignedString(7 * inch, 5.6 * inch, "3,456.789")
    c.drawAlignedString(7 * inch, 5.4 * inch, "123")
    c.setFillColor(colors.red)
    c.drawAlignedString(7 * inch, 5.2 * inch, "(7,192,302.30)")

    #mark the cursor where it stopped
    c.showPage()

    ##############################################################
    #
    # page 2 - line styles
    #
    ###############################################################

    #page 2 - lines and styles
    framePage(c, 'Line Drawing Styles')

    # three line ends, lines drawn the hard way
    #firt make some vertical end markers
    c.setDash(4, 4)
    c.setLineWidth(0)
    c.line(inch, 9.2 * inch, inch, 7.8 * inch)
    c.line(3 * inch, 9.2 * inch, 3 * inch, 7.8 * inch)
    c.setDash()  #clears it

    c.setLineWidth(5)
    c.setLineCap(0)
    p = c.beginPath()
    p.moveTo(inch, 9 * inch)
    p.lineTo(3 * inch, 9 * inch)
    c.drawPath(p)
    c.drawString(4 * inch, 9 * inch,
                 'the default - butt caps project half a width')
    makesubsection(c, "caps and joins", 8.5 * inch)

    c.setLineCap(1)
    p = c.beginPath()
    p.moveTo(inch, 8.5 * inch)
    p.lineTo(3 * inch, 8.5 * inch)
    c.drawPath(p)
    c.drawString(4 * inch, 8.5 * inch, 'round caps')

    c.setLineCap(2)
    p = c.beginPath()
    p.moveTo(inch, 8 * inch)
    p.lineTo(3 * inch, 8 * inch)
    c.drawPath(p)
    c.drawString(4 * inch, 8 * inch, 'square caps')

    c.setLineCap(0)

    # three line joins
    c.setLineJoin(0)
    p = c.beginPath()
    p.moveTo(inch, 7 * inch)
    p.lineTo(2 * inch, 7 * inch)
    p.lineTo(inch, 6.7 * inch)
    c.drawPath(p)
    c.drawString(4 * inch, 6.8 * inch, 'Default - mitered join')

    c.setLineJoin(1)
    p = c.beginPath()
    p.moveTo(inch, 6.5 * inch)
    p.lineTo(2 * inch, 6.5 * inch)
    p.lineTo(inch, 6.2 * inch)
    c.drawPath(p)
    c.drawString(4 * inch, 6.3 * inch, 'round join')

    c.setLineJoin(2)
    p = c.beginPath()
    p.moveTo(inch, 6 * inch)
    p.lineTo(2 * inch, 6 * inch)
    p.lineTo(inch, 5.7 * inch)
    c.drawPath(p)
    c.drawString(4 * inch, 5.8 * inch, 'bevel join')

    c.setDash(6, 6)
    p = c.beginPath()
    p.moveTo(inch, 5 * inch)
    p.lineTo(3 * inch, 5 * inch)
    c.drawPath(p)
    c.drawString(4 * inch, 5 * inch,
                 'dash 6 points on, 6 off- setDash(6,6) setLineCap(0)')
    makesubsection(c, "dash patterns", 5 * inch)

    c.setLineCap(1)
    p = c.beginPath()
    p.moveTo(inch, 4.5 * inch)
    p.lineTo(3 * inch, 4.5 * inch)
    c.drawPath(p)
    c.drawString(4 * inch, 4.5 * inch,
                 'dash 6 points on, 6 off- setDash(6,6) setLineCap(1)')

    c.setLineCap(0)
    c.setDash([1, 2, 3, 4, 5, 6], 0)
    p = c.beginPath()
    p.moveTo(inch, 4.0 * inch)
    p.lineTo(3 * inch, 4.0 * inch)
    c.drawPath(p)
    c.drawString(4 * inch, 4 * inch,
                 'dash growing - setDash([1,2,3,4,5,6],0) setLineCap(0)')

    c.setLineCap(1)
    c.setLineJoin(1)
    c.setDash(32, 12)
    p = c.beginPath()
    p.moveTo(inch, 3.0 * inch)
    p.lineTo(2.5 * inch, 3.0 * inch)
    p.lineTo(inch, 2 * inch)
    c.drawPath(p)
    c.drawString(4 * inch, 3 * inch,
                 'dash pattern, join and cap style interacting - ')
    c.drawString(4 * inch, 3 * inch - 12,
                 'round join & miter results in sausages')
    c.textAnnotation('Annotation',
                     Rect=(4 * inch, 3 * inch - 72, inch, inch - 12))

    c.showPage()

    ##############################################################
    #
    # higher level shapes
    #
    ###############################################################
    framePage(c, 'Shape Drawing Routines')

    t = c.beginText(inch, 10 * inch)
    t.textLines("""
Rather than making your own paths, you have access to a range of shape routines.
These are built in pdfgen out of lines and bezier curves, but use the most compact
set of operators possible.  We can add any new ones that are of general use at no
cost to performance.""")
    t.textLine()

    #line demo
    makesubsection(c, "lines", 10 * inch)
    c.line(inch, 8 * inch, 3 * inch, 8 * inch)
    t.setTextOrigin(4 * inch, 8 * inch)
    t.textLine('canvas.line(x1, y1, x2, y2)')

    #bezier demo - show control points
    makesubsection(c, "bezier curves", 7.5 * inch)
    (x1, y1, x2, y2, x3, y3, x4,
     y4) = (inch, 6.5 * inch, 1.2 * inch, 7.5 * inch, 3 * inch, 7.5 * inch,
            3.5 * inch, 6.75 * inch)
    c.bezier(x1, y1, x2, y2, x3, y3, x4, y4)
    c.setDash(3, 3)
    c.line(x1, y1, x2, y2)
    c.line(x3, y3, x4, y4)
    c.setDash()
    t.setTextOrigin(4 * inch, 7 * inch)
    t.textLine('canvas.bezier(x1, y1, x2, y2, x3, y3, x4, y4)')

    #rectangle
    makesubsection(c, "rectangles", 7 * inch)
    c.rect(inch, 5.25 * inch, 2 * inch, 0.75 * inch)
    t.setTextOrigin(4 * inch, 5.5 * inch)
    t.textLine('canvas.rect(x, y, width, height) - x,y is lower left')

    #wedge
    makesubsection(c, "wedges", 5 * inch)
    c.wedge(inch, 5 * inch, 3 * inch, 4 * inch, 0, 315)
    t.setTextOrigin(4 * inch, 4.5 * inch)
    t.textLine('canvas.wedge(x1, y1, x2, y2, startDeg, extentDeg)')
    t.textLine('Note that this is an elliptical arc, not just circular!')

    #wedge the other way
    c.wedge(inch, 4 * inch, 3 * inch, 3 * inch, 0, -45)
    t.setTextOrigin(4 * inch, 3.5 * inch)
    t.textLine('Use a negative extent to go clockwise')

    #circle
    makesubsection(c, "circles", 3.5 * inch)
    c.circle(1.5 * inch, 2 * inch, 0.5 * inch)
    c.circle(3 * inch, 2 * inch, 0.5 * inch)
    t.setTextOrigin(4 * inch, 2 * inch)
    t.textLine('canvas.circle(x, y, radius)')
    c.drawText(t)

    c.showPage()

    ##############################################################
    #
    # Page 4 - fonts
    #
    ###############################################################
    framePage(c, "Font Control")

    c.drawString(inch, 10 * inch, 'Listing available fonts...')

    y = 9.5 * inch
    for fontname in c.getAvailableFonts():
        c.setFont(fontname, 24)
        c.drawString(inch, y, 'This should be %s' % fontname)
        y = y - 28
    makesubsection(c, "fonts and colors", 4 * inch)

    c.setFont('Times-Roman', 12)
    t = c.beginText(inch, 4 * inch)
    t.textLines("""Now we'll look at the color functions and how they interact
    with the text.  In theory, a word is just a shape; so setFillColorRGB()
    determines most of what you see.  If you specify other text rendering
    modes, an outline color could be defined by setStrokeColorRGB() too""")
    c.drawText(t)

    t = c.beginText(inch, 2.75 * inch)
    t.setFont('Times-Bold', 36)
    t.setFillColor(colors.green)  #green
    t.textLine('Green fill, no stroke')

    #t.setStrokeColorRGB(1,0,0)  #ou can do this in a text object, or the canvas.
    t.setStrokeColor(
        colors.red)  #ou can do this in a text object, or the canvas.
    t.setTextRenderMode(2)  # fill and stroke
    t.textLine('Green fill, red stroke - yuk!')

    t.setTextRenderMode(0)  # back to default - fill only
    t.setFillColorRGB(0, 0, 0)  #back to default
    t.setStrokeColorRGB(0, 0, 0)  #ditto
    c.drawText(t)
    c.showPage()

    #########################################################################
    #
    #  Page 5 - coord transforms
    #
    #########################################################################
    framePage(c, "Coordinate Transforms")
    c.setFont('Times-Roman', 12)
    t = c.beginText(inch, 10 * inch)
    t.textLines(
        """This shows coordinate transformations.  We draw a set of axes,
    moving down the page and transforming space before each one.
    You can use saveState() and restoreState() to unroll transformations.
    Note that functions which track the text cursor give the cursor position
    in the current coordinate system; so if you set up a 6 inch high frame
    2 inches down the page to draw text in, and move the origin to its top
    left, you should stop writing text after six inches and not eight.""")
    c.drawText(t)

    drawAxes(c, "0.  at origin")
    c.addLiteral('%about to translate space')
    c.translate(2 * inch, 7 * inch)
    drawAxes(c, '1. translate near top of page')

    c.saveState()
    c.translate(1 * inch, -2 * inch)
    drawAxes(c, '2. down 2 inches, across 1')
    c.restoreState()

    c.saveState()
    c.translate(0, -3 * inch)
    c.scale(2, -1)
    drawAxes(c, '3. down 3 from top, scale (2, -1)')
    c.restoreState()

    c.saveState()
    c.translate(0, -5 * inch)
    c.rotate(-30)
    drawAxes(c, "4. down 5, rotate 30' anticlockwise")
    c.restoreState()

    c.saveState()
    c.translate(3 * inch, -5 * inch)
    c.skew(0, 30)
    drawAxes(c, "5. down 5, 3 across, skew beta 30")
    c.restoreState()

    c.showPage()

    #########################################################################
    #
    #  Page 6 - clipping
    #
    #########################################################################
    framePage(c, "Clipping")
    c.setFont('Times-Roman', 12)
    t = c.beginText(inch, 10 * inch)
    t.textLines(
        """This shows clipping at work. We draw a chequerboard of rectangles
    into a path object, and clip it.  This then forms a mask which limits the region of
    the page on which one can draw.  This paragraph was drawn after setting the clipping
    path, and so you should only see part of the text.""")
    c.drawText(t)

    c.saveState()
    #c.setFillColorRGB(0,0,1)
    p = c.beginPath()
    #make a chesboard effect, 1 cm squares
    for i in range(14):
        x0 = (3 + i) * cm
        for j in range(7):
            y0 = (16 + j) * cm
            p.rect(x0, y0, 0.85 * cm, 0.85 * cm)
    c.addLiteral('%Begin clip path')
    c.clipPath(p)
    c.addLiteral('%End clip path')
    t = c.beginText(3 * cm, 22.5 * cm)
    t.textLines(
        """This shows clipping at work.  We draw a chequerboard of rectangles
    into a path object, and clip it.  This then forms a mask which limits the region of
    the page on which one can draw.  This paragraph was drawn after setting the clipping
    path, and so you should only see part of the text.
        This shows clipping at work.  We draw a chequerboard of rectangles
    into a path object, and clip it.  This then forms a mask which limits the region of
    the page on which one can draw.  This paragraph was drawn after setting the clipping
    path, and so you should only see part of the text.
        This shows clipping at work.  We draw a chequerboard of rectangles
    into a path object, and clip it.  This then forms a mask which limits the region of
    the page on which one can draw.  This paragraph was drawn after setting the clipping
    path, and so you should only see part of the text.""")
    c.drawText(t)

    c.restoreState()

    t = c.beginText(inch, 5 * inch)
    t.textLines(
        """You can also use text as an outline for clipping with the text render mode.
        The API is not particularly clean on this and one has to follow the right sequence;
        this can be optimized shortly.""")
    c.drawText(t)

    #first the outline
    c.saveState()
    t = c.beginText(inch, 3.0 * inch)
    t.setFont('Helvetica-BoldOblique', 108)
    t.setTextRenderMode(5)  #stroke and add to path
    t.textLine('Python!')
    t.setTextRenderMode(0)
    c.drawText(t)  #this will make a clipping mask

    #now some small stuff which wil be drawn into the current clip mask
    t = c.beginText(inch, 4 * inch)
    t.setFont('Times-Roman', 6)
    t.textLines((('spam ' * 40) + '\n') * 15)
    c.drawText(t)

    #now reset canvas to get rid of the clipping mask
    c.restoreState()

    c.showPage()

    #########################################################################
    #
    #  Page 7 - images
    #
    #########################################################################
    framePage(c, "Images")
    c.setFont('Times-Roman', 12)
    t = c.beginText(inch, 10 * inch)
    if not haveImages:
        c.drawString(
            inch, 11 * inch,
            "Python or Java Imaging Library not found! Below you see rectangles instead of images."
        )

    t.textLines(
        """PDFgen uses the Python Imaging Library (or, under Jython, java.awt.image and javax.imageio)
        to process a very wide variety of image formats.
        This page shows image capabilities.  If I've done things right, the bitmap should have
        its bottom left corner aligned with the crosshairs.
        There are two methods for drawing images.  The recommended use is to call drawImage.
        This produces the smallest PDFs and the fastest generation times as each image's binary data is
        only embedded once in the file.  Also you can use advanced features like transparency masks.
        You can also use drawInlineImage, which puts images in the page stream directly.
        This is slightly faster for Acrobat to render or for very small images, but wastes
        space if you use images more than once.""")

    c.drawText(t)

    if haveImages:
        gif = os.path.join(os.path.dirname(unittest.__file__),
                           'pythonpowered.gif')
        c.drawInlineImage(gif, 2 * inch, 7 * inch)
    else:
        c.rect(2 * inch, 7 * inch, 110, 44)

    c.line(1.5 * inch, 7 * inch, 4 * inch, 7 * inch)
    c.line(2 * inch, 6.5 * inch, 2 * inch, 8 * inch)
    c.drawString(4.5 * inch, 7.25 * inch, 'inline image drawn at natural size')

    if haveImages:
        c.drawInlineImage(gif, 2 * inch, 5 * inch, inch, inch)
    else:
        c.rect(2 * inch, 5 * inch, inch, inch)

    c.line(1.5 * inch, 5 * inch, 4 * inch, 5 * inch)
    c.line(2 * inch, 4.5 * inch, 2 * inch, 6 * inch)
    c.drawString(4.5 * inch, 5.25 * inch, 'inline image distorted to fit box')

    c.drawString(
        1.5 * inch, 4 * inch,
        'Image XObjects can be defined once in the file and drawn many times.')
    c.drawString(1.5 * inch, 3.75 * inch,
                 'This results in faster generation and much smaller files.')

    for i in range(5):
        if haveImages:
            (w, h) = c.drawImage(gif, (1.5 + i) * inch, 3 * inch)
        else:
            (w, h) = (144, 10)
            c.rect((1.5 + i) * inch, 3 * inch, 110, 44)

    myMask = [254, 255, 222, 223, 0, 1]
    c.drawString(
        1.5 * inch, 2.5 * inch,
        "The optional 'mask' parameter lets you define transparent colors. We used a color picker"
    )
    c.drawString(
        1.5 * inch, 2.3 * inch,
        "to determine that the yellow in the image above is RGB=(225,223,0).  We then define a mask"
    )
    c.drawString(
        1.5 * inch, 2.1 * inch,
        "spanning these RGB values:  %s.  The background vanishes!!" % myMask)
    c.drawString(2.5 * inch, 1.2 * inch, 'This would normally be obscured')
    if haveImages:
        c.drawImage(gif, 1 * inch, 1.2 * inch, w, h, mask=myMask)
        c.drawImage(gif, 3 * inch, 1.2 * inch, w, h, mask='auto')
    else:
        c.rect(1 * inch, 1.2 * inch, w, h)
        c.rect(3 * inch, 1.2 * inch, w, h)

    c.showPage()

    if haveImages:
        import shutil
        c.drawString(1 * inch, 10.25 * inch, 'This jpeg is actually a gif')
        jpg = outputfile('_i_am_actually_a_gif.jpg')
        shutil.copyfile(gif, jpg)
        c.drawImage(jpg, 1 * inch, 9.25 * inch, w, h, mask='auto')
        tjpg = os.path.join(os.path.dirname(os.path.dirname(gif)), 'docs',
                            'images', 'lj8100.jpg')
        if os.path.isfile(tjpg):
            c.drawString(4 * inch, 10.25 * inch, 'This gif is actually a jpeg')
            tgif = outputfile(os.path.basename('_i_am_actually_a_jpeg.gif'))
            shutil.copyfile(tjpg, tgif)
            c.drawImage(tgif, 4 * inch, 9.25 * inch, w, h, mask='auto')
        c.showPage()


#########################################################################
#
#  Page 8 - Forms and simple links
#
#########################################################################
    framePage(c, "Forms and Links")
    c.setFont('Times-Roman', 12)
    t = c.beginText(inch, 10 * inch)
    t.textLines("""Forms are sequences of text or graphics operations
      which are stored only once in a PDF file and used as many times
      as desired.  The blue logo bar to the left is an example of a form
      in this document.  See the function framePageForm in this demo script
      for an example of how to use canvas.beginForm(name, ...) ... canvas.endForm().

      Documents can also contain cross references where (for example) a rectangle
      on a page may be bound to a position on another page.  If the user clicks
      on the rectangle the PDF viewer moves to the bound position on the other
      page.  There are many other types of annotations and links supported by PDF.

      For example, there is a bookmark to each page in this document and below
      is a browsable index that jumps to those pages. In addition we show two
      URL hyperlinks; for these, you specify a rectangle but must draw the contents
      or any surrounding rectangle yourself.
      """)
    c.drawText(t)

    nentries = len(titlelist)
    xmargin = 3 * inch
    xmax = 7 * inch
    ystart = 6.54 * inch
    ydelta = 0.4 * inch
    for i in range(nentries):
        yposition = ystart - i * ydelta
        title = titlelist[i]
        c.drawString(xmargin, yposition, title)
        c.linkAbsolute(title, title,
                       (xmargin - ydelta / 4, yposition - ydelta / 4, xmax,
                        yposition + ydelta / 2))

    # test URLs
    r1 = (inch, 3 * inch, 5 * inch, 3.25 * inch)  # this is x1,y1,x2,y2
    c.linkURL('http://www.reportlab.com/', r1, thickness=1, color=colors.green)
    c.drawString(inch + 3, 3 * inch + 6,
                 'Hyperlink to www.reportlab.com, with green border')

    r1 = (inch, 2.5 * inch, 5 * inch, 2.75 * inch)  # this is x1,y1,x2,y2
    c.linkURL('mailto:[email protected]', r1)  #, border=0)
    c.drawString(inch + 3, 2.5 * inch + 6, 'mailto: hyperlink, without border')

    r1 = (inch, 2 * inch, 5 * inch, 2.25 * inch)  # this is x1,y1,x2,y2
    c.linkURL('http://www.reportlab.com/',
              r1,
              thickness=2,
              dashArray=[2, 4],
              color=colors.magenta)
    c.drawString(inch + 3, 2 * inch + 6, 'Hyperlink with custom border style')

    xpdf = outputfile('test_hello.pdf').replace('\\', '/')
    link = 'Hard link to %s, with red border' % xpdf
    r1 = (inch, 1.5 * inch,
          inch + 2 * 3 + c.stringWidth(link, c._fontname, c._fontsize),
          1.75 * inch)  # this is x1,y1,x2,y2
    c.linkURL(xpdf, r1, thickness=1, color=colors.red, kind='GoToR')
    c.drawString(inch + 3, 1.5 * inch + 6, link)

    ### now do stuff for the outline
    #for x in outlinenametree: print x
    #stop
    #apply(c.setOutlineNames0, tuple(outlinenametree))
    return c
Exemplo n.º 37
0
def chain_samples_multithread(dirs, names, group_filename, gff_filename, count_filename, field_to_use='count_fl', fuzzy_junction=0, allow_5merge=False, max_3_diff=100, fastq_filename=None, cpus=4):
    for d in dirs.values():
        sample_sanity_check(os.path.join(d, group_filename),\
                            os.path.join(d, gff_filename),\
                            os.path.join(d, count_filename),\
                            os.path.join(d, fastq_filename) if fastq_filename is not None else None)

    count_header, count_info = read_count_info(count_filename, dirs, field_to_use)

    # some names may already start with "tmp_" which means they are intermediate results that have already been chained
    # find the first non "tmp_" and start from there
    if names[0].startswith('tmp_'):
        chain = []
        for start_i,name in enumerate(names):
            if name.startswith('tmp_'):
                chain.append(name[4:])
            else:
                break
        # start_i, name now points at the first "non-tmp" sample
        # we want to go to the last tmp_ sample and read it
        name = names[start_i-1][4:] # this is the last tmp_ sample, let's read it
        first_add = False
    else: # everything is new, start fresh
        name = names[0]
        chain = [name]
        start_i = 1
        first_add = True

    for addon_name in names[start_i:]:
        assert not addon_name.startswith('tmp_')
        ref_name = chain[-1]
        ref_d = dirs[ref_name]
        if first_add:
            ref_gff = os.path.join(ref_d, gff_filename)
            ref_group = os.path.join(ref_d, group_filename)
            ref_fq = os.path.join(ref_d, fastq_filename) if fastq_filename is not None else None
        else:
            ref_name = 'tmp_' + ref_name
            ref_gff = ref_name + '.gff'
            ref_group = ref_name + '.group.txt'
            ref_fq = ref_name + '.rep.fq' if fastq_filename is not None else None
        addon_d = dirs[addon_name]
        addon_gff = os.path.join(addon_d, gff_filename)
        addon_group = os.path.join(addon_d, group_filename)
        addon_fq = os.path.join(addon_d, fastq_filename) if fastq_filename is not None else None
        split_outs, split_ins = chain_split_file(ref_gff=ref_gff,
                            ref_group=ref_group,
                            ref_name=ref_name,
                            addon_gff=addon_gff,
                            addon_group=addon_group,
                            addon_name=addon_name,
                            fuzzy_junction=fuzzy_junction,
                            allow_5merge=allow_5merge,
                            max_3_diff=max_3_diff,
                            n_chunks=cpus)

        combine_split_chained_results(split_outs,
                                      final_prefix='tmp_'+addon_name,
                                      ref_gff=ref_gff,
                                      ref_group=ref_group,
                                      ref_name=ref_name,
                                      ref_fq=ref_fq,
                                      addon_gff=addon_gff,
                                      addon_group=addon_group,
                                      addon_name=addon_name,
                                      addon_fq=addon_fq)

        chain.append(addon_name)
        for in_gff_split, in_group_split in split_ins:
            os.remove(in_gff_split)  # remove the split gff
            os.remove(in_group_split)

        first_add = False

    # now recursively chain back by looking at mega_info.txt!!!
    d = {} # ex: (tmp_sample1, PB.1.1) --> mega info dict
    for c in chain[1:]:
        for r in DictReader(open('tmp_' + c + '.mega_info.txt'),delimiter='\t'):
            d['tmp_'+c, r['superPBID']] = r

    f1 = open('all_samples.chained_ids.txt', 'w')
    writer1 = DictWriter(f1, fieldnames=['superPBID']+chain, delimiter='\t')
    writer1.writeheader()
    f2 = open('all_samples.chained_count.txt', 'w')
    writer2 = DictWriter(f2, fieldnames=['superPBID']+chain, delimiter='\t')
    writer2.writeheader()

    reader = DictReader(open('tmp_' + chain[-1] + '.mega_info.txt'),delimiter='\t')
    for r in reader:
        saw_NA = False
        r0 = r
        answer = defaultdict(lambda: 'NA') # ex: 1009 --> PB.1.1
        answer2 = defaultdict(lambda: 'NA') # ex: 1009 --> count
        answer[chain[-1]] = r[chain[-1]]
        if r[chain[-1]] !='NA':
            answer2[chain[-1]] = count_info[chain[-1], answer[chain[-1]]]
        for c in chain[::-1][1:-1]:  # the first sample does not have tmp_, because it's not a chain
            if r['tmp_'+c] == 'NA':
                saw_NA = True
                break
            else:
                r2 = d['tmp_'+c, r['tmp_'+c]]
                answer[c] = r2[c]
                if answer[c] != 'NA':
                    answer2[c] = count_info[c, answer[c]]
                r = r2
        if not saw_NA:
            answer[chain[0]] = r[chain[0]]
            if answer[chain[0]] !='NA':
                answer2[chain[0]] = count_info[chain[0], answer[chain[0]]]

        rec1 = {'superPBID': r0['superPBID']}
        rec2 = {'superPBID': r0['superPBID']}
        for c in chain:
            rec1[c] = answer[c]
            rec2[c] = str(answer2[c])
        writer1.writerow(rec1)
        writer2.writerow(rec2)
    f1.close()
    f2.close()

    shutil.copyfile('tmp_' + chain[-1] + '.gff', 'all_samples.chained.gff')
    if fastq_filename is not None:
        shutil.copyfile('tmp_' + chain[-1] + '.rep.fq', 'all_samples.chained.rep.fq')

    print("Chained output written to:", file=sys.stdout)
    print("all_samples.chained.gff", file=sys.stdout)
    print(f1.name, file=sys.stdout)
    print(f2.name, file=sys.stdout)
    if fastq_filename is not None:
        print("all_samples.chained.rep.fq", file=sys.stdout)
Exemplo n.º 38
0
def _copy_file_to_testdir(fname, sub_dir=""):
    out_dir = os.path.join(test_dir, sub_dir)
    idr.safe_makedir(out_dir)
    out_file = os.path.join(out_dir, os.path.basename(fname))
    shutil.copyfile(fname, out_file)
    return out_file
Exemplo n.º 39
0
train_list = list(df_train['image_id'])
val_list = list(df_val['image_id'])

# Transfer the training images
for image in train_list:

    fname = image + '.jpg'
    label = df.loc[image, 'dx']

    if fname in folder_1:
        # source path to image
        src = os.path.join('ham10000_images_part_1', fname)
        # destination path to image
        dst = os.path.join(train_dir, label, fname)
        # copy the image from the source to the destination
        shutil.copyfile(src, dst)

    if fname in folder_2:
        # source path to image
        src = os.path.join('ham10000_images_part_2', fname)
        # destination path to image
        dst = os.path.join(train_dir, label, fname)
        # copy the image from the source to the destination
        shutil.copyfile(src, dst)

# Transfer the validation images
for image in val_list:

    fname = image + '.jpg'
    label = df.loc[image, 'dx']
def this_file_copy():
    shutil.copyfile(str(sys.argv)[2:-2], str(sys.argv)[2:-2] + '_cp')
    return str(sys.argv)[2:-2] + '_cp'
Exemplo n.º 41
0
        usage()
        sys.exit()
    else:
        assert False, "unhandled option"

# do not overwrite existing user files
if not force:
    if os.path.isfile(starget) or os.path.isfile(ttarget):
        print('There are already users on the system!')
        usage()
        sys.exit(1)

# copy example user files
ssource = constants.EXAMPLEDIR + '/students.csv'
tsource = constants.EXAMPLEDIR + '/teachers.csv'
copyfile(ssource, starget)
copyfile(tsource, ttarget)

# script header
filename = os.path.basename(__file__).replace('.py', '')
logfile = constants.LOGDIR + '/' + filename + '.log'

title = 'Creating test users for default-school'
printScript('', 'begin')
printScript(title)

msg = 'Logging to ' + logfile
printScript(msg)

# set password policy
msg = 'Password policy setup '
Exemplo n.º 42
0
def save_checkpoint(state, is_best):
    torch.save(state, latest_check)
    if is_best:
        shutil.copyfile(latest_check, best_check)
Exemplo n.º 43
0
def work_this_src_file(file_line):  #
    #
    ##
    #	print "work_this_src_file(file_line): " #
    #	print working_file1 #
    wrkf1 = open(VPOT_conf.working_file1, 'w', encoding="utf-8")  #
    with open(file_line[0], 'r', encoding="utf-8") as source_vcf:  #
        for src_line in source_vcf:  # work each line of source vcf file
            src_line1 = re.split(
                '\t|\n|\r', src_line)  # split into file location and sample id
            if ("#CHROM" not in src_line1[0]):  # skip the header lines
                #				print (src_line1) #
                # variants lines
                SAMPLE1 = re.split(':', src_line1[
                    VPOT_conf.sample_loc])  # split the sample's FORMAT fields
                #				print ("MaxCOverage : ",VPOT_conf.Maxcoverage) #
                #				print ("Hete_balance : ",VPOT_conf.Hete_Balance) #
                #				print "coverage_loc : ",VPOT_conf.sample_coverage_loc #
                #				print SAMPLE1 #
                #				print VPOT_conf.sample_coverage_loc #
                # have a coverage check on the sample
                #				if (((VPOT_conf.sample_coverage_loc[VPOT_conf.NR_val] == -1) or ((SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.NR_val]] != ".") and (int(SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.NR_val]]) >= VPOT_conf.Maxcoverage))) and #  NR
                #					((VPOT_conf.sample_coverage_loc[VPOT_conf.DP_val] == -1) or ((SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.DP_val]] != ".") and (int(SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.DP_val]]) >= VPOT_conf.Maxcoverage)))) : # DP
                #				if (SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.GT_val]] != "./.") : # a valid genotype
                if (SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.GT_val]] !=
                        "./.") and (SAMPLE1[VPOT_conf.sample_coverage_loc[
                            VPOT_conf.GT_val]] != "0/.") and (
                                SAMPLE1[VPOT_conf.sample_coverage_loc[
                                    VPOT_conf.GT_val]] != "./0") and (
                                        SAMPLE1[VPOT_conf.sample_coverage_loc[
                                            VPOT_conf.GT_val]] !=
                                        "0/0"):  # a valid genotype
                    if (VPOT_conf.sample_coverage_loc[VPOT_conf.DP_val] !=
                            -1):  #this sample have a coverage depth
                        Sample_coverage = int(
                            SAMPLE1[VPOT_conf.sample_coverage_loc[
                                VPOT_conf.DP_val]])  # save DP value
                        Alt_reads = int(SAMPLE1[VPOT_conf.sample_coverage_loc[
                            VPOT_conf.DP_val]]) / 2  # save DP value
#						print ("DP") #
                    if (VPOT_conf.sample_coverage_loc[VPOT_conf.NR_val] != -1
                        ) and (
                            VPOT_conf.sample_coverage_loc[VPOT_conf.NV_val] !=
                            -1
                        ):  #this sample have a coverage depth from NR and NV
                        #						Sample_coverage=int(SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.NR_val]])+int(SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.NV_val]]) # save DP value
                        Sample_coverage = int(
                            SAMPLE1[VPOT_conf.sample_coverage_loc[
                                VPOT_conf.NR_val]])  # save DP value
                        Alt_reads = int(SAMPLE1[VPOT_conf.sample_coverage_loc[
                            VPOT_conf.NV_val]])  # save DP value
#						print ("NR+NV") #
#					print ("TOT: ",str(Sample_coverage)) #
#					print ("ALT_READ: ",str(Alt_reads)) #
#					print ("pass") #
#					if (SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.DP_val]] == ".") : # no DP_val
#						SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.DP_val]] = "0"  # set it as zero
#					if (SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.NR_val]] == ".") : # no DP_val
#						SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.NR_val]] = "0"  # set it as zero
#					if (((VPOT_conf.sample_coverage_loc[VPOT_conf.NR_val] == -1) or
#						((VPOT_conf.is_number(SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.NR_val]])) and (int(SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.NR_val]]) >= int(VPOT_conf.Maxcoverage)))) and  #  NR
#						((VPOT_conf.sample_coverage_loc[VPOT_conf.DP_val] == -1) or
#						((VPOT_conf.is_number(SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.DP_val]])) and (int(SAMPLE1[VPOT_conf.sample_coverage_loc[VPOT_conf.DP_val]]) >= int(VPOT_conf.Maxcoverage))))) : # DP
#
                    VPOT_conf.QC_PASS = False  #
                    if ((Sample_coverage >= int(VPOT_conf.Maxcoverage))
                            and (int((Alt_reads / Sample_coverage) * 100) >=
                                 int(VPOT_conf.Hete_Balance))
                        ):  # Pas QC for coverage and balance
                        VPOT_conf.QC_PASS = True  # Yes
#					print ("QC_PASS",VPOT_conf.QC_PASS) #
#						print ("add") #
#						print ("add") #
#					Allow for phased genotypes like 0|1
                    GT_values = re.split(
                        '[|/]', SAMPLE1[VPOT_conf.sample_coverage_loc[
                            VPOT_conf.GT_val]])  # get the genotype fields
                    # 	 				print GT_values #
                    for j in range(len(GT_values)):  #
                        #					print GT_values[j] #
                        if (GT_values[j] not in VPOT_conf.Non_alt_GT_types
                            ):  # when filtering for QC value
                            #							print ("keep this variant1") #
                            check_this_variant(src_line, wrkf1)  #
                            break  # get out of for loop (GT_values)

    wrkf1.close()  # finish with the output file
    #
    #	print "sort unique" #
    ##	COMMAND="sort -V -u -k1,5 "+VPOT_conf.working_file1+" > "+VPOT_conf.sort_file1 #
    COMMAND = "sort -V -u " + VPOT_conf.working_file1 + " > " + VPOT_conf.sort_file1  #
    subprocess.call(COMMAND, shell=True)  #
    copyfile(VPOT_conf.sort_file1, VPOT_conf.working_file1)  # copy back
Exemplo n.º 44
0
def save_configs(cfgs_list, directory):
    # stores config files in the experiment dir
    timestamp = time.strftime('%Y-%m-%d-%H-%M')
    for config_file in cfgs_list:
        filename = f"{timestamp}-{os.path.basename(config_file)}"
        shutil.copyfile(config_file, os.path.join(directory, filename))
Exemplo n.º 45
0
    def preflight(self):
        '''
        Rewrite input file in tsv format.
        Write config file based on params.
        Generate command line from params.
        '''
        self.params['translations']['csv_input_file'] = os.path.join(
            self.params['input_dir_path'], self.params['input_file'])
        self.params['translations']['output_file_incl_path'] = os.path.join(
            self.params['output_dir_path'], self.params['output_file'])

        n = 0
        while os.path.exists(
                os.path.join(self.params['input_dir_path'],
                             'ptmshephered_tmp_{0}'.format(n))):
            n += 1
        self.tmp_dir = os.path.join(self.params['input_dir_path'],
                                    'ptmshephered_tmp_{0}'.format(n))
        os.mkdir(self.tmp_dir)

        tmp_input_file = self.write_input_tsv(
            self.params['translations']['csv_input_file'], self.tmp_dir)

        write_exclusion_list = [
            'base_mz',
        ]
        for ptmshep_param in self.params['translations'][
                '_grouped_by_translated_key'].keys():
            for ursgal_param_name, param_value in self.params['translations'][
                    '_grouped_by_translated_key'][ptmshep_param].items():
                if ptmshep_param in write_exclusion_list:
                    continue
                elif ptmshep_param == 'dataset':
                    print(
                        '[ PREFLGHT ] copying mzML files into tmp folder ...')
                    for mzml in param_value:
                        shutil.copyfile(
                            mzml,
                            os.path.join(self.tmp_dir, os.path.basename(mzml)))
                    self.params_to_write['dataset'] = '01 {0} {1}'.format(
                        tmp_input_file, self.tmp_dir)
                elif ursgal_param_name == 'ptmshepherd_peak_picking_params':
                    for k, v in param_value.items():
                        self.params_to_write[k] = v
                elif ptmshep_param == 'precursor_tol':
                    self.params_to_write['precursor_tol'] = \
                        self.params['translations']['precursor_mass_tolerance_plus'] +\
                        self.params['translations']['precursor_mass_tolerance_minus']
                elif ptmshep_param == 'spectra_ppmtol':
                    if self.params['translations'][
                            'frag_mass_tolerance_unit'] == 'da':
                        self.params_to_write['spectra_ppmtol'] = \
                            ursgal.ucore.convert_ppm_to_dalton(
                                self.params['translations']['frag_mass_tolerance'],
                                base_mz=self.params['translations']['base_mz']
                        )
                    elif self.params['translations'][
                            'frag_mass_tolerance_unit'] == 'ppm':
                        self.params_to_write['spectra_ppmtol'] = self.params[
                            'translations']['frag_mass_tolerance']
                    else:
                        print(
                            'please add convertion of frag mass tolerance for ptmshepherd for {0}'
                            .format(self.params['translations']
                                    ['frag_mass_tolerance_unit']))
                        sys.exit(1)
                elif ptmshep_param == 'varmod_masses':
                    assert len(self.params['mods']['fix']) == 0, '''
                        [ERROR] PTM-Shepherd does not support fixed modifications.
                        [ERROR] Please change the following mods to variable mods:
                        {0}
                    '''.format(self.params['mods']['fix'])
                    mod_list = []
                    for mod_dict in self.params['mods']['opt']:
                        mod_list.append('{0}:{1}'.format(
                            mod_dict['name'], mod_dict['mass']))
                    self.params_to_write['varmod_masses'] = ','.join(mod_list)
                elif ptmshep_param == 'mass_offsets':
                    self.params_to_write['mass_offsets'] = '/'.join(
                        param_value)
                elif ptmshep_param == '-Xmx':
                    xmx = '-Xmx{0}'.format(param_value)
                else:
                    self.params_to_write[ptmshep_param] = param_value

        #create param file
        self.param_file_name = self.write_params_file(self.tmp_dir)

        #command line
        self.params['command_list'] = [
            'java',
            '-jar',
            xmx,
            self.exe,
            self.param_file_name,
        ]
        print(self.params['command_list'])

        return self.params
Exemplo n.º 46
0
def build_bsa(dir_src: os.PathLike, bsa: os.PathLike, temp_alt: os.PathLike,
              arch_exe: os.PathLike, arch_flags: ArchiveFlags):
    """Build a bsa.

    Args:
        dir_src: All valid files in this directory are packed into the bsa.
        bsa: The bsa is created at this path.
            This is the final path e.g. /Some/Path/Mod.bsa.
        temp_alt: A directory whose path does not contain a directory that
            ends with "Data". Will be used to store temporary files.
        arch_exe: Path to Archive.exe, the executable that creates the bsa.
        arch_flags: Checks the corresponding options in Archive.exe.

    Some genius at Bethesda imposed two constraint regarding Archive.exe:
        1. The loose files must be in a directory that ends with "Data".
        2. The path to that directory must not contain another directory that
            ends with "Data".
    As luck would have it the default location for temporary files has such a
        directory, namely AppData. Thus another location must be used.
    """
    with tempfile.TemporaryDirectory(dir=temp_alt, suffix="Data") as dir_temp:
        # Create manifest and copy files to temporary directory
        manifest = os.path.join(dir_temp, "Manifest.txt")
        with open(manifest, "w") as fh:
            for root, subdirs, files in os.walk(dir_src):
                root_rel = pathlib.PurePath(root).relative_to(dir_src)
                for file in files:
                    path_rel = root_rel.joinpath(file)
                    first_dir = path_rel.parts[0]
                    if first_dir.lower() in BSA_INCLUDE_DIRS:
                        fh.write("{}\n".format(path_rel))
                    src = os.path.join(root, file)
                    dst = os.path.join(dir_temp, path_rel)
                    os.makedirs(os.path.dirname(dst), exist_ok=True)
                    shutil.copyfile(src, dst)
        # Exit if manifest is empty because Archive.exe will crash
        if os.path.getsize(manifest) == 0:
            return
        # Create batch file
        batch = os.path.join(dir_temp, "Batch.txt")
        log = "{}.log".format(os.path.splitext(os.path.basename(bsa))[0])
        with open(batch, "w") as fh:
            if logger.getEffectiveLevel() < logging.INFO:
                fh.write("Log: {}\n".format(log))
            fh.write("New Archive\n")
            if arch_flags.check_meshes:
                fh.write("Check: Meshes\n")
            if arch_flags.check_textures:
                fh.write("Check: Textures\n")
            if arch_flags.check_menus:
                fh.write("Check: Menus\n")
            if arch_flags.check_sounds:
                fh.write("Check: Sounds\n")
            if arch_flags.check_voices:
                fh.write("Check: Voices\n")
            if arch_flags.check_shaders:
                fh.write("Check: Shaders\n")
            if arch_flags.check_trees:
                fh.write("Check: Trees\n")
            if arch_flags.check_fonts:
                fh.write("Check: Fonts\n")
            if arch_flags.check_misc:
                fh.write("Check: Misc\n")
            if arch_flags.check_compress_archive:
                fh.write("Check: Compress Archive\n")
            if arch_flags.check_retain_directory_names:
                fh.write("Check: Retain Directory Names\n")
            if arch_flags.check_retain_file_names:
                fh.write("Check: Retain File Names\n")
            if arch_flags.check_retain_file_name_offsets:
                fh.write("Check: Retain File Name Offsets\n")
            if arch_flags.check_retain_strings_during_startup:
                fh.write("Check: Retain Strings During Startup\n")
            if arch_flags.check_embed_file_name:
                fh.write("Check: Embed File Names\n")
            fh.write("Set File Group Root: {}{}\n".format(dir_temp, os.sep))
            fh.write("Add File Group: {}\n".format(manifest))
            fh.write("Save Archive: {}\n".format(bsa))
        # Build bsa
        cmd = [arch_exe, batch]
        sp = subprocess.run(cmd)
        sp.check_returncode()
        # Delete useless bsl file
        bsl = "{}.bsl".format(os.path.splitext(bsa)[0])
        os.remove(bsl)
Exemplo n.º 47
0
    def upload(self, inputTag, baseFile, destTag, synchro, destSince, success,
               expectedAction):
        insertedSince = None
        destFile = '%s.db' % destTag
        metaDestFile = '%s.txt' % destTag
        shutil.copyfile(baseFile, destFile)
        self.log(
            '# ---------------------------------------------------------------------------'
        )
        self.log(
            '# Testing tag %s with synch=%s, destSince=%s - expecting ret=%s action=%s'
            % (destTag, synchro, destSince, success, expectedAction))

        descr = 'Testing conditionsUpload with synch:%s - expected action: %s' % (
            synchro, expectedAction)
        makeMetadataFile(inputTag, destTag, destSince, descr)
        beforeUpload = datetime.datetime.utcnow()
        ret = uploadFile(destFile, self.logFileName)
        if ret != success:
            self.log(
                'ERROR: the return value for the upload of tag %s with sychro %s was %s, while the expected result is %s'
                % (destTag, synchro, ret, success))
            self.errors += 1
        else:
            row = self.db.getLastInsertedSince(destTag, beforeUpload)
            if ret == True:
                if expectedAction == 'CREATE' or expectedAction == 'INSERT' or expectedAction == 'APPEND':
                    if destSince != row[0]:
                        self.log(
                            'ERROR: the since inserted is %s, expected value is %s - expected action: %s'
                            % (row[0], destSince, expectedAction))
                        self.errors += 1
                    else:
                        self.log(
                            '# OK: Found expected value for last since inserted: %s timestamp: %s'
                            % (row[0], row[1]))
                        insertedSince = row[0]
                elif expectedAction == 'SYNCHRONIZE':
                    if destSince == row[0]:
                        self.log(
                            'ERROR: the since inserted %s has not been synchronized with the FCSR - expected action: %s'
                            % (row[0], expectedAction))
                        self.errors += 1
                    else:
                        self.log(
                            '# OK: Found synchronized value for the last since inserted: %s timestamp: %s'
                            % (row[0], row[1]))
                        insertedSince = row[0]
                else:
                    self.log(
                        'ERROR: found an appended since %s - expected action: %s'
                        % (row[0], expectedAction))
                    self.errors += 1
            else:
                if not row is None:
                    self.log(
                        'ERROR: found new insered since: %s timestamp: %s' %
                        (row[0], row[1]))
                    self.errors += 1
                if expectedAction != 'FAIL':
                    self.log('ERROR: Upload failed. Expected value: %s' %
                             (destSince))
                    self.errors += 1
                else:
                    self.log('# OK: Upload failed as expected.')
        os.remove(destFile)
        os.remove(metaDestFile)
        return insertedSince
Exemplo n.º 48
0
set_default2_command = {'commands': [{'command':'setDefaultACLAccess', 'actions':'bad'}]}
set_default2_response = {'responses': [{'command': 'setDefaultACLAccess', 'error': 'Missing/invalid actions array'}]}


rc = 1
keepalive = 10
connect_packet = mosq_test.gen_connect("ctrl-test", keepalive=keepalive, username="******", password="******")
connack_packet = mosq_test.gen_connack(rc=0)

mid = 2
subscribe_packet = mosq_test.gen_subscribe(mid, "$CONTROL/dynamic-security/#", 1)
suback_packet = mosq_test.gen_suback(mid, 1)

try:
    os.mkdir(str(port))
    shutil.copyfile("dynamic-security-init.json", "%d/dynamic-security.json" % (port))
except FileExistsError:
    pass

broker = mosq_test.start_broker(filename=os.path.basename(__file__), use_conf=True, port=port)

try:
    sock = mosq_test.do_client_connect(connect_packet, connack_packet, timeout=5, port=port)
    mosq_test.do_send_receive(sock, subscribe_packet, suback_packet, "suback")

    command_check(sock, bad1_command, bad1_response, "1")
    command_check(sock, bad2_command, bad2_response, "2")
    command_check(sock, bad3_command, bad3_response, "3")
    command_check(sock, bad4_command, bad4_response, "4")
    command_check(sock, bad5_command, bad5_response, "5")
    command_check(sock, bad6_command, bad6_response, "6")
Exemplo n.º 49
0
#query
query_path = download_path + '/query'
query_save_path = download_path + '/pytorch/query'
if not os.path.isdir(query_save_path):
    os.mkdir(query_save_path)

for root, dirs, files in os.walk(query_path, topdown=True):
    for name in files:
        if not name[-3:]=='jpg':
            continue
        ID  = name.split('_')
        src_path = query_path + '/' + name
        dst_path = query_save_path + '/' + ID[0] 
        if not os.path.isdir(dst_path):
            os.mkdir(dst_path)
        copyfile(src_path, dst_path + '/' + name)

#-----------------------------------------
#multi-query
query_path = download_path + '/gt_bbox'
# for dukemtmc-reid, we do not need multi-query
if os.path.isdir(query_path):
    query_save_path = download_path + '/pytorch/multi-query'
    if not os.path.isdir(query_save_path):
        os.mkdir(query_save_path)

    for root, dirs, files in os.walk(query_path, topdown=True):
        for name in files:
            if not name[-3:]=='jpg':
                continue
            ID  = name.split('_')
Exemplo n.º 50
0
    def postflight(self, csv_input=None):
        '''
        Take rawlocalize and rawsimrt files for PSM annotation.
        Use global.profile for modification annotation.
        Merge result with original input file.
        Rename modsummary and global.profile to keep them as summary output files.
        '''
        # Build lookup from global.profile.tsv
        internal_precision = 100000
        global_profile = 'global.profile.tsv'
        rawloc_file = '01.rawlocalize'
        rawsimrt_file = '01.rawsimrt'
        global_mod_dict = {}
        print('[ POSTFLGH ] reading global.profil.tsv')
        with open(global_profile, 'r') as gp_in:
            gp_reader = csv.DictReader(gp_in, delimiter='\t')
            for row in gp_reader:
                lower_mass = float(row['PeakLower']) * internal_precision
                upper_mass = float(row['PeakUpper']) * internal_precision
                peak_mass = float(row['PeakApex'])
                modifications = []
                for k, v in row.items():
                    if 'Potential Modification' in k:
                        if v == '':
                            continue
                        modifications.append(v)
                modifications = ';'.join(modifications)
                for mass in range(int(lower_mass), int(upper_mass)):
                    assert mass not in global_mod_dict.keys(), '''
                    [ERROR] Overlapping mass shift annotation peaks in PTM-Shepherd.
                    {0}
                    '''.format(mass)
                    global_mod_dict[mass] = (peak_mass, modifications)

        print('[ POSTFLGH ] reading rawlocalize')
        rawloc_dict = {}
        with open(rawloc_file, 'r') as rawloc_in:
            rawloc_reader = csv.DictReader(rawloc_in, delimiter='\t')
            for rawloc_line_dict in rawloc_reader:
                spec_pep_key = '{0}||{1}'.format(rawloc_line_dict['Spectrum'],
                                                 rawloc_line_dict['Peptide'])
                if spec_pep_key not in rawloc_dict.keys():
                    rawloc_dict[spec_pep_key] = []
                tmp_dict = {}
                for k in [
                        'Localized_Pep',
                        'MaxHyper_Unloc',
                        'MaxHyper_Loc',
                        'MaxPeaks_Unloc',
                        'MaxPeaks_Loc',
                ]:
                    tmp_dict[k] = rawloc_line_dict[k]
                rawloc_dict[spec_pep_key].append(tmp_dict)

        print('[ POSTFLGH ] reading rawsimrt')
        simrt_dict = {}
        with open(rawsimrt_file, 'r') as rawsimrt_in:
            simrt_reader = csv.DictReader(rawsimrt_in, delimiter='\t')
            for simrt_line_dict in simrt_reader:
                spec_pep_key = '{0}||{1}'.format(simrt_line_dict['Spectrum'],
                                                 simrt_line_dict['Peptide'])
                if spec_pep_key not in simrt_dict.keys():
                    simrt_dict[spec_pep_key] = []
                tmp_dict = {}
                for k in [
                        'DeltaRT',
                        'nZeroSpecs_DeltaRT',
                        'Avg_Sim',
                        'Avg_ZeroSim',
                ]:
                    tmp_dict[k] = simrt_line_dict[k]
                simrt_dict[spec_pep_key].append(tmp_dict)

        print('read original input csv and writing output ...')
        if csv_input is None:
            csv_input = self.params['translations']['csv_input_file']
        csv_output = self.params['translations']['output_file_incl_path']
        #read from original input csv file
        with open(csv_input, 'r') as csv_in, \
            open(csv_output, 'w') as csv_out:
            csv_reader = csv.DictReader(csv_in)
            fieldnames = csv_reader.fieldnames
            fieldnames.extend([
                'Mass Difference Annotations',
                'PTM-Shepherd:Localized_Pep',
                'PTM-Shepherd:MaxHyper_Unloc',
                'PTM-Shepherd:MaxHyper_Loc',
                'PTM-Shepherd:MaxPeaks_Unloc',
                'PTM-Shepherd:MaxPeaks_Loc',
                'PTM-Shepherd:DeltaRT',
                'PTM-Shepherd:nZeroSpecs_DeltaRT',
                'PTM-Shepherd:Avg_Sim',
                'PTM-Shepherd:Avg_ZeroSim',
            ])
            if sys.platform == 'win32':
                lineterminator = '\n'
            else:
                lineterminator = '\r\n'
            csv_writer = csv.DictWriter(csv_out,
                                        fieldnames=fieldnames,
                                        lineterminator=lineterminator)
            csv_writer.writeheader()
            for n, line_dict in enumerate(csv_reader):
                total_mass_shift = 0
                for single_mass_shift in line_dict['Mass Difference'].split(
                        ';'):
                    if single_mass_shift == '':
                        continue
                    total_mass_shift += float(single_mass_shift.split(':')[0])
                    # break
                transformed_mass_shift = int(total_mass_shift *
                                             internal_precision)
                peak_mass, annot_modifications = global_mod_dict.get(
                    transformed_mass_shift, (line_dict['Mass Difference'], ''))
                line_dict['Mass Difference'] = '{0}:n'.format(peak_mass)
                line_dict['Mass Difference Annotations'] = annot_modifications
                spec_pep_key = '{0}||{1}'.format(line_dict['Spectrum Title'],
                                                 line_dict['Sequence'])
                # rawloc_line_dict =  next(rawloc_reader)
                # print(rawloc_line_dict)
                assert len(rawloc_dict[spec_pep_key]) == 1, '''
                [ERROR] Spectrum Title + Peptide from original input matches to multiple
                [ERROR] entries in rawlocalization output
                {0}
                '''.format(rawloc_dict[spec_pep_key])
                # for rawloc_line_dict in rawloc_dict[spec_pep_key]:
                rawloc_line_dict = rawloc_dict[spec_pep_key][0]
                line_dict['PTM-Shepherd:Localized_Pep'] = rawloc_line_dict[
                    'Localized_Pep']
                line_dict['PTM-Shepherd:MaxHyper_Unloc'] = rawloc_line_dict[
                    'MaxHyper_Unloc']
                line_dict['PTM-Shepherd:MaxHyper_Loc'] = rawloc_line_dict[
                    'MaxHyper_Loc']
                line_dict['PTM-Shepherd:MaxPeaks_Unloc'] = rawloc_line_dict[
                    'MaxPeaks_Unloc']
                line_dict['PTM-Shepherd:MaxPeaks_Loc'] = rawloc_line_dict[
                    'MaxPeaks_Loc']
                # rawsimrt_line_dict =  next(rawsimrt_reader)
                # print(rawsimrt_line_dict)
                assert len(simrt_dict[spec_pep_key]) == 1, '''
                [ERROR] Spectrum Title + Peptide from original input matches to multiple
                [ERROR] entries in rawsimrt output
                {0}
                '''.format(simrt_dict[spec_pep_key])
                rawsimrt_line_dict = simrt_dict[spec_pep_key][0]
                line_dict['PTM-Shepherd:DeltaRT'] = rawsimrt_line_dict[
                    'DeltaRT']
                line_dict[
                    'PTM-Shepherd:nZeroSpecs_DeltaRT'] = rawsimrt_line_dict[
                        'nZeroSpecs_DeltaRT']
                line_dict['PTM-Shepherd:Avg_Sim'] = rawsimrt_line_dict[
                    'Avg_Sim']
                line_dict['PTM-Shepherd:Avg_ZeroSim'] = rawsimrt_line_dict[
                    'Avg_ZeroSim']

                csv_writer.writerow(line_dict)

        shutil.copyfile(
            'global.modsummary.tsv',
            self.params['translations']['output_file_incl_path'].replace(
                '.csv', '_modsummary.tsv'))
        shutil.copyfile(
            'global.profile.tsv',
            self.params['translations']['output_file_incl_path'].replace(
                '.csv', '_profile.tsv'))
        self.created_tmp_files.extend([
            'global.modsummary.tsv',
            'global.profile.tsv',
            '01.histo',
            '01.locprofile.txt',
            '01.ms2counts',
            '01.rawlocalize',
            '01.rawsimrt',
            '01.simrtprofile.txt',
            'combined.histo',
            'global.locprofile.txt',
            'global.simrtprofile.txt',
        ])
        shutil.rmtree(self.tmp_dir)
        return
Exemplo n.º 51
0
        subblocks.append(code_snippet(signature))
        docstring = function.__doc__
        if docstring:
            subblocks.append(process_function_docstring(docstring))
        blocks.append('\n\n'.join(subblocks))

    if not blocks:
        raise RuntimeError('Found no content for page ' + page_data['page'])

    mkdown = '\n----\n\n'.join(blocks)
    # save module page.
    # Either insert content into existing page,
    # or create page otherwise
    page_name = page_data['page']
    path = os.path.join('sources', page_name)
    if os.path.exists(path):
        template = open(path).read()
        assert '{{autogenerated}}' in template, (
            'Template found for ' + path +
            ' but missing {{autogenerated}} tag.')
        mkdown = template.replace('{{autogenerated}}', mkdown)
        print('...inserting autogenerated content into template:', path)
    else:
        print('...creating new page with autogenerated content:', path)
    subdir = os.path.dirname(path)
    if not os.path.exists(subdir):
        os.makedirs(subdir)
    open(path, 'w').write(mkdown)

shutil.copyfile('../CONTRIBUTING.md', 'sources/contributing.md')
 try:
     download(IMAGE.temp['url'], Image_dir, IMAGE.count)
 except:
     IMAGE.false.append(IMAGE.temp['url'])
     IMAGE.count1 = len(IMAGE.false)
     print('FATA: Could not download image {} {} from {}'.format(
         IMAGE.count1, Image_dir, url_tmp))
 print "Start : %s" % time.ctime()
 time.sleep(1)
 print "End : %s" % time.ctime()
 if IS_VALID_JPEG('./temp_val/{}.jpeg'.format(IMAGE.count)):
     md5 = IMAGE.GetFileMd5('./temp_val/{}.jpeg'.format(IMAGE.count))
     print(md5list[IMAGE.count])
     if md5 == md5list[IMAGE.count]:
         try:
             shutil.copyfile('./temp_val/{}.jpeg'.format(IMAGE.count),
                             Image_dir)
             counterpart_path = os.path.join(
                 os.path.join('landmarks_val', str(IMAGE.temp['id'])),
                 '{}.jpeg'.format(IMAGE.count))
             image_line_xy = '{},{},{},{},{},{}{}'.format(
                 counterpart_path.strip(), IMAGE.temp['x1'],
                 IMAGE.temp['y1'], IMAGE.temp['x2'], IMAGE.temp['y2'],
                 IMAGE.temp['id'], '\n')
             with open('landmarks_val_xy.txt', 'a') as f2:
                 f2.writelines(image_line_xy)
             print('Success:save image as {}'.format(counterpart_path))
         except:
             counterpart_path = os.path.join(
                 os.path.join('landmarks_val', str(IMAGE.temp['id'])),
                 '{}.jpeg'.format(IMAGE.count))
             wrong_line = '{}{}'.format(counterpart_path, '\n')
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 24 13:09:37 2020

@author: qtckp
"""

import os.path as op
from shutil import copyfile

model_data_path = './model/model_data'

from stop_words import get_stop_words

stop_words = get_stop_words('ru')

with open(op.join(model_data_path, 'stopwords_base.txt'),
          'w',
          encoding='utf-8') as file:
    file.writelines([s + '\n' for s in stop_words])

copyfile(op.join(model_data_path, 'stopwords_base.txt'),
         op.join(model_data_path, 'stopwords_used.txt'))
settings_path = read_from_control(controlFolder/controlFile,'settings_summa_path')

# Specify default path if needed
if settings_path == 'default':
    settings_path = make_default_path('settings/SUMMA') # outputs a Path()
else:
    settings_path = Path(settings_path) # make sure a user-specified path is a Path()

# Make the folder if it doesn't exist
settings_path.mkdir(parents=True, exist_ok=True)
    
    
# --- Copy the settings
# Loop over all files and copy
for file in os.listdir(base_settings_path):
    copyfile(base_settings_path/file, settings_path/file);
    
    
# --- Code provenance
# Generates a basic log file in the domain folder and copies the control file and itself there.

# Set the log path and file name
logPath = settings_path
log_suffix = '_copy_base_settings.txt'

# Create a log folder
logFolder = '_workflow_log'
Path( logPath / logFolder ).mkdir(parents=True, exist_ok=True)

# Copy this script
thisFile = '1_copy_base_settings.py'
Exemplo n.º 55
0
def setup_ssl_options(config_dir, src, dest):  # pragma: no cover
    """Move the ssl_options into position and return the path."""
    option_path = os.path.join(config_dir, dest)
    shutil.copyfile(src, option_path)
    return option_path
Exemplo n.º 56
0
def main(args):
  """

  if multi-dim => LOCI & ABOD
  else => MZS

  FILE  data set in CSV format
  """

  if len(args) < 1:
    print_msg("No file argument supplied")
    sys.exit(1)

  file_path = args[0]

  if not ".csv" in file_path:
    # A certain file extension is required to
    # make path manipulation easier.
    print_msg("Input file must have file extension .csv!")
    print_msg("File: "+file_path)
    sys.exit(1)

  if not os.path.isfile(file_path):
    print_msg("File not found! "+file_path)
    sys.exit(1)

  #Parse the data set's ID
  dataset_id = file_path.split("/")
  if len(dataset_id) > 0:
    dataset_id = dataset_id[-1]
  else:
    dataset_id = file_path
  dataset_id = dataset_id.split("-")[0]

  print_msg("Analyzing "+file_path)

  # Figure out the number of dimensions of the data set
  file_handle = open(file_path, "r")
  line = file_handle.readline()
  file_handle.close()
  dims = len(line.split(","))
  print_msg(str(dims) + " dimension(s)")

  output_path = OUTPUT_PATH
  output_path = os.path.abspath(output_path) + "/"

  if dims > 1:
    # Make sure data set contains at least 20 rows
    file_handle = open(file_path, "r")
    rows = sum(1 for i in file_handle)
    file_handle.close()
    if rows < 20:
      print_msg("Number of data points has to be >20!")
      print_msg("Aborting!")
      sys.exit(1)

    # Configure output path for ELKI
    output_path = output_path+dataset_id+"/"

    # LOCI, ABOD
    loci_file_name = "loci-mdef-outlier_order.txt"
    loci_parsed_file_path = output_path+"parsed-"+loci_file_name
    abod_file_name = "abod-outlier_order.txt"
    abod_parsed_file_path = output_path+"parsed-"+abod_file_name

    # LOCI parameters
    loci_alpha = cm.get("loci", "alpha")
    loci_nmin = cm.get("loci", "nmin")
    loci_std = cm.get("loci", "std")

    # Calculate rmax for LOCI
    loci_rmax_args = []
    loci_rmax_args.append(file_path)
    loci_rmax_args.append(loci_alpha)
    loci_rmax = locirmax.main(loci_rmax_args)

    # Run LOCI
    run_elki(
        file_path,
        output_path,
        "loci",
        {
          "loci.alpha": loci_alpha,
          "loci.rmax": loci_rmax,
          "loci.nmin": loci_nmin
        }
    )

    # Run ABOD
    run_elki(
        file_path,
        output_path,
        "abod",
        None
    )

    # Remove description strings and ID columns
    print_msg("Removing string clutter from result files")
    dataparser.parseResultFile(output_path+loci_file_name, loci_parsed_file_path)
    dataparser.parseResultFile(output_path+abod_file_name, abod_parsed_file_path)

    # Label outliers detected by LOCI
    label_args = ["lociLabelOutliers.R"]
    label_args.append(loci_parsed_file_path)
    label_args.append(str(loci_std))
    label_command = ["Rscript"]
    label_command.extend(label_args)
    print_msg("Running LOCI labelling: "+str(label_command))

    try:
      subprocess.check_output(label_command)
    except subprocess.CalledProcessError as ex:
      print_msg("")
      print_msg("Process failed!")
      print_msg("Process command "+str(ex.cmd))
      print_msg("Process output "+ex.output)
      return

  else:
    # MZS
    mzs_std = cm.get("mzs", "std")

    # Copy data to results directory, create dir if not exists
    file_name = file_path.split("/")[-1]
    mzs_file_path = output_path+file_name
    mzs_file_path = mzs_file_path.replace(".csv", "-original.csv")

    if not os.path.exists(output_path):
          os.makedirs(output_path)
    shutil.copyfile(file_path, mzs_file_path)

    # Run MZS
    mzs_command = ["Rscript"]
    mzs_args = ["modifiedZScore.R"]
    mzs_args.append(mzs_file_path)
    mzs_args.append(str(mzs_std))
    mzs_command.extend(mzs_args)
    print_msg("Running MZS: "+str(mzs_command))

    try:
      subprocess.check_output(mzs_command)
    except subprocess.CalledProcessError as ex:
      print_msg("")
      print_msg("Process failed!")
      print_msg("Process command "+str(ex.cmd))
      print_msg("Process output "+ex.output)
      return

    # Create plots
    plot_command = ["Rscript"]
    plot_args = ["plotOneDimOutliers.R"]
    plot_args.append(mzs_file_path.replace("original", "labelled"))
    plot_command.extend(plot_args)
    print_msg("Plotting: " + str(plot_command))

    try:
      subprocess.check_output(plot_command)
    except subprocess.CalledProcessError as ex:
      print_msg("")
      print_msg("Process failed!")
      print_msg("Process command "+str(ex.cmd))
      print_msg("Process output "+ex.output)
      return

    # Run significance tests
    sig_command = ["Rscript"]
    sig_args = ["sigTest.R"]
    sig_args.append(mzs_file_path.replace("original", "labelled"))
    sig_command.extend(sig_args)
    print_msg("Running significance tests: "+str(sig_command))

    try:
      subprocess.check_output(sig_command)
    except subprocess.CalledProcessError as ex:
      print_msg("")
      print_msg("Process failed!")
      print_msg("Process command "+str(ex.cmd))
      print_msg("Process output "+ex.output)
      return

  print_msg("All analysis have been conducted.")
  print_msg("Check "+output_path+" for the results!")
  print_msg("\n")
Exemplo n.º 57
0
from shutil import copyfile
import os

# Folder where tifs are located
InputFolder = './out/sice_2020_fortran/'
OutFolder = 'out/SICE_2020_py/'
#
#os.mkdir(OutFolder)

#%% Copying files into processing folder   
def transfer_to_out_folder(old_name,new_name):
    copyfile(InputFolder+old_name, 
             OutFolder+new_name )

for i in range(21):
    copyfile(InputFolder+'olci_toa_toa'+str(i+1)+'.tif', 
             OutFolder+'r_TOA_'+str(i+1).zfill(2)+'.tif' )
#    bv.OutlookRaster(dat,('Oa'+str(i+1).zfill(2)))

transfer_to_out_folder('olci_toa_ozone.tif','O3.tif')
transfer_to_out_folder('olci_toa_water.tif','WV.tif')
transfer_to_out_folder('olci_toa_sza.tif','SZA.tif')
transfer_to_out_folder('olci_toa_saa.tif','SAA.tif')
transfer_to_out_folder('olci_toa_vza.tif','OZA.tif')
transfer_to_out_folder('olci_toa_vaa.tif','OAA.tif')
transfer_to_out_folder('olci_toa_height.tif','height.tif')
copyfile('masks/greenland.tif',  OutFolder+'mask.tif' )

#%% turning geotiffs into sice input
print("Reading input ")

import rasterio as rio
Exemplo n.º 58
0

# Create ll output folder.
if not os.path.exists(outputDir):
    os.makedirs(outputDir)

# Create the .love file.
with ZipFile("./ll/ll.love", "w") as zipObj:
    addDir("./data")
    addDir("./game")
    addDir("./lib")
    addDir("./shader")
    zipObj.write("./main.lua")

# Copy love.exe and *.dll from love installation directory into output directory.
shutil.copyfile("C:\Program Files\LOVE\love.exe",
                os.path.join(outputDir, "ll.exe"))
shutil.copyfile("C:\Program Files\LOVE\love.dll",
                os.path.join(outputDir, "love.dll"))
shutil.copyfile("C:\Program Files\LOVE\lua51.dll",
                os.path.join(outputDir, "lua51.dll"))
shutil.copyfile("C:\Program Files\LOVE\mpg123.dll",
                os.path.join(outputDir, "mpg123.dll"))
shutil.copyfile("C:\Program Files\LOVE\msvcp120.dll",
                os.path.join(outputDir, "msvcp120.dll"))
shutil.copyfile("C:\Program Files\LOVE\msvcr120.dll",
                os.path.join(outputDir, "msvcr120.dll"))
shutil.copyfile("C:\Program Files\LOVE\OpenAL32.dll",
                os.path.join(outputDir, "OpenAL32.dll"))
shutil.copyfile("C:\Program Files\LOVE\SDL2.dll",
                os.path.join(outputDir, "SDL2.dll"))
Exemplo n.º 59
0
def mv(oldfile, newfile):
    shutil.copyfile(oldfile, newfile)
    os.remove(oldfile)
Exemplo n.º 60
0
def transfer_to_out_folder(old_name,new_name):
    copyfile(InputFolder+old_name, 
             OutFolder+new_name )