Example #1
0
 def destroy(self):
     try:
         os.unlink(self.filename)
     except OSError as error:
         if error.errno != errno.ENOENT:  # That's what we wanted anyway.
             raise
     self.close()
Example #2
0
 def test_check_links_fail(self):
     with cd(self.target_dir):
         os.unlink(os.path.join("output", "archive.html"))
         try:
             __main__.main(['check', '-l'])
         except SystemExit as e:
             self.assertNotEqual(e.code, 0)
Example #3
0
def slicethread(fname, oname, wname, cfg, jobid):
  retcode = "fail"
  try:
    con = sqlite3.connect('db.sqlite')
    con.row_factory = sqlite3.Row

    cfg = "config.ini" if cfg is None else cfg

    proc = subprocess.Popen(["slic3r",
      "--load", cfg,
      fname, "-o", wname+'.gcode'])
    con.execute('insert into journal(cmd, pid, action, status, timestamp) values(?,?,?,?,DateTime(\'now\'))',
      ('slice {} -c {}'.format(os.path.basename(fname),
                               os.path.basename(cfg)), proc.pid, 'start',
        0 if proc.returncode == None else 1 ))
    con.commit()
    retcode = proc.wait()
    con.execute('insert into journal(cmd, pid, action, status, timestamp) values(?,?,?,?,DateTime(\'now\'))',
      ('slice {} -c {}'.format(os.path.basename(fname),
                               os.path.basename(cfg)), proc.pid, 'stop',
        proc.returncode))
    con.commit()
    try:
      os.unlink(oname+'.gcode')
    except OSError as e:
      pass
    finally:
      try:
        os.rename(wname+'.gcode', oname+'.gcode')
      except Exception:
        logging.info( wname+'.gcode')
        logging.info( oname+'.gcode')
        pass
  finally:
    _work_done(jobid, val=retcode)
Example #4
0
    def testNoDanglingFileDescriptorAfterCloseVariant2(self):
        ''' Test that when closing the provider all file handles are released '''

        datasource = os.path.join(self.basetestpath, 'testNoDanglingFileDescriptorAfterCloseVariant2.csv')
        with open(datasource, 'wt') as f:
            f.write('id,WKT\n')
            f.write('1,\n')
            f.write('2,POINT(2 49)\n')

        vl = QgsVectorLayer('{}|layerid=0'.format(datasource), 'test', 'ogr')
        self.assertTrue(vl.isValid())
        # Consume all features.
        myiter = vl.getFeatures()
        for feature in myiter:
            pass
        # The iterator is closed, but the corresponding connection still not closed
        if sys.platform.startswith('linux'):
            self.assertEqual(count_opened_filedescriptors(datasource), 2)

        # Should release one file descriptor
        del vl

        # Non portable, but Windows testing is done with trying to unlink
        if sys.platform.startswith('linux'):
            self.assertEqual(count_opened_filedescriptors(datasource), 0)

        # Check that deletion works well (can only fail on Windows)
        os.unlink(datasource)
        self.assertFalse(os.path.exists(datasource))
Example #5
0
def _decr_iscsiSR_refcount(targetIQN, uuid):
    filename = os.path.join(ISCSI_REFDIR, targetIQN)
    if not os.path.exists(filename):
        return 0
    try:
        f = open(filename, 'a+')
    except:
        raise xs_errors.XenError('LVMRefCount', \
                                 opterr='file %s' % filename)
    output = []
    refcount = 0
    for line in filter(match_uuid, f.readlines()):
        if line.find(uuid) == -1:
            output.append(line[:-1])
            refcount += 1
    if not refcount:
        os.unlink(filename)
        return refcount

    # Re-open file and truncate
    f.close()
    f = open(filename, 'w')
    for i in range(0,refcount):
        f.write("%s\n" % output[i])
    f.close()
    return refcount
Example #6
0
    def test_create_volume_from_image_exception(self):
        """Verify that create volume from image, the volume status is
        'downloading'."""
        dst_fd, dst_path = tempfile.mkstemp()
        os.close(dst_fd)

        self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path)

        image_id = 'aaaaaaaa-0000-0000-0000-000000000000'
        # creating volume testdata
        volume_id = 1
        db.volume_create(self.context,
                         {'id': volume_id,
                          'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
                          'display_description': 'Test Desc',
                          'size': 20,
                          'status': 'creating',
                          'host': 'dummy'})

        self.assertRaises(exception.ImageNotFound,
                          self.volume.create_volume,
                          self.context,
                          volume_id, None, None, None,
                          None,
                          image_id)
        volume = db.volume_get(self.context, volume_id)
        self.assertEqual(volume['status'], "error")
        # cleanup
        db.volume_destroy(self.context, volume_id)
        os.unlink(dst_path)
Example #7
0
def get(url, path, verbose=False):
    sha_url = url + ".sha256"
    with tempfile.NamedTemporaryFile(delete=False) as temp_file:
        temp_path = temp_file.name
    with tempfile.NamedTemporaryFile(suffix=".sha256", delete=False) as sha_file:
        sha_path = sha_file.name

    try:
        download(sha_path, sha_url, False, verbose)
        if os.path.exists(path):
            if verify(path, sha_path, False):
                if verbose:
                    print("using already-download file " + path)
                return
            else:
                if verbose:
                    print("ignoring already-download file " + path + " due to failed verification")
                os.unlink(path)
        download(temp_path, url, True, verbose)
        if not verify(temp_path, sha_path, verbose):
            raise RuntimeError("failed verification")
        if verbose:
            print("moving {} to {}".format(temp_path, path))
        shutil.move(temp_path, path)
    finally:
        delete_if_present(sha_path, verbose)
        delete_if_present(temp_path, verbose)
Example #8
0
    def POST(self):
        web.header("Content-Type", "text/html")

        i = web.input(myfile = {})
        (parent, child) = (safepath(i["path"]), i["myfile"].filename)

        if not auth.can_write(parent):
            return json.dumps({"success": False, "msg": "Unauthorized"})

        srcp = common.Mapper.d2s(parent)
        if not srcp.startswith(config["juno-home"] + "/"):
            return json.dumps({"success": False, "msg": "Unauthorized"})

        if not os.path.isdir(srcp) or not child or child[0] == "." \
                or "/" in child:
            return json.dumps({"success": False, "msg": "Unauthorized"})

        srcp = os.path.join(srcp, child)
        if os.path.exists(srcp):
            return json.dumps({"success": False, "msg": "Unauthorized"})
        
        f = open(srcp, "w")
        f.write(i["myfile"].value)
        f.close()
        
        if not odptools.odf.Odp.is_odp(srcp):
            os.unlink(srcp)
            return json.dumps({"success": False, "msg": "Unauthorized"})

        if subprocess.call(("./index.py", "-p", srcp)):
            os.unlink(srcp)
            return json.dumps({"success": False, "msg": "Unauthorized"})

        return json.dumps({"success": True})
Example #9
0
def splitFasta (seqFile, segments):
    #Split sequence file into
    #  as many fragments as appropriate
    #   depending on the size of original_fasta
    #
    #   Clean up any segment files from previous runs before creating new one
    #
    for i in segments:
        os.unlink(i)
    current_file_index = 0
    #current_size = 0
    for line in open(original_fasta):
        #
        # start a new file for each accession line
        #
        if line[0] == '>':
            #current_size +=1
            current_file_index += 1
            #if (current_size >= pack_size)
            file_name = "%d.segment" % current_file_index
            file_path = os.path.join(temp_directory, file_name)
            current_file = open(file_path, "w")
            #current_size = 0
        if current_file_index:
           current_file.write(line) 
    end_file = open(os.path.join(temp_directory, "end_tmp"), 'w')
    end_file.write(str(current_file_index))
    end_file.close()
    os.rename(os.path.join(temp_directory, "end_tmp"), os.path.join(temp_directory, "end"))
Example #10
0
 def _testStreamRoundtrip(self):
   inD = open(self.fName).read()
   supp = Chem.SDMolSupplier(self.fName)
   outName = tempfile.mktemp('.sdf')
   writer = Chem.SDWriter(outName)
   m1 = next(supp)
   for m in supp:
     writer.write(m)
   writer.flush()
   writer = None
   outD = open(outName,'r').read()
   try:
     os.unlink(outName)
   except Exception:
     import time
     time.sleep(1)
     try:
       os.unlink(outName)
     except Exception:
       pass
   assert inD.count('$$$$')==outD.count('$$$$'),'bad nMols in output'
   io = StringIO(outD)
   supp = Chem.SDMolSupplier(stream=io)
   outD2 = supp.Dump()
   assert outD2.count('$$$$')==len(supp),'bad nMols in output'
   assert outD2.count('$$$$')==outD.count('$$$$'),'bad nMols in output'
   assert outD2==outD,'bad outd'
Example #11
0
 def print_xcf(self, filename_or_obj, *args, **kwargs):
     "Writes the figure to a GIMP XCF image file"
     # If filename_or_obj is a file-like object we need a temporary file for
     # GIMP's output too...
     if is_string(filename_or_obj):
         out_temp_handle, out_temp_name = None, filename_or_obj
     else:
         out_temp_handle, out_temp_name = tempfile.mkstemp(suffix='.xcf')
     try:
         # Create a temporary file and write the "layer" to it as a PNG
         in_temp_handle, in_temp_name = tempfile.mkstemp(suffix='.png')
         try:
             FigureCanvasAgg.print_png(self, in_temp_name, *args, **kwargs)
             run_gimp_script(
                 SINGLE_LAYER_SCRIPT.format(
                     input=quote_string(in_temp_name),
                     output=quote_string(out_temp_name)))
         finally:
             os.close(in_temp_handle)
             os.unlink(in_temp_name)
     finally:
         if out_temp_handle:
             os.close(out_temp_handle)
             # If we wrote the XCF to a temporary file, write its content to
             # the file-like object we were given (the copy is chunked as
             # XCF files can get pretty big)
             with open(out_temp_name, 'rb') as source:
                 for chunk in iter(lambda: source.read(131072), ''):
                     filename_or_obj.write(chunk)
             os.unlink(out_temp_name)
Example #12
0
    def __init__(self, flavor_id=""):
        import sys
        self.initialized = False
        basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace("/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock'
        # os.path.splitext(os.path.abspath(sys.modules['__main__'].__file__))[0].replace("/", "-").replace(":", "").replace("\\", "-") + '-%s' % flavor_id + '.lock'
        self.lockfile = os.path.normpath(tempfile.gettempdir() + '/' + basename)

        logger.debug("SingleInstance lockfile: " + self.lockfile)
        if sys.platform == 'win32':
            try:
                # file already exists, we try to remove (in case previous execution was interrupted)
                if os.path.exists(self.lockfile):
                    os.unlink(self.lockfile)
                self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
            except OSError:
                type, e, tb = sys.exc_info()
                if e.errno == 13:
                    logger.error("Another instance is already running, quitting.")
                    sys.exit(-1)
                print(e.errno)
                raise
        else:  # non Windows
            import fcntl
            self.fp = open(self.lockfile, 'w')
            try:
                fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
            except IOError:
                logger.warning("Another instance is already running, quitting.")
                sys.exit(-1)
        self.initialized = True
Example #13
0
 def test_Writer(self):
   " tests writes using a file name "
   with open(self.fName,'r') as inf:
     inD = inf.read()
   supp = Chem.SDMolSupplier(self.fName)
   outName = tempfile.mktemp('.sdf')
   writer = Chem.SDWriter(outName)
   m1 = next(supp)
   writer.SetProps(m1.GetPropNames())
   for m in supp:
     writer.write(m)
   writer.flush()
   # The writer does not have an explicit "close()" so need to
   # let the garbage collector kick in to close the file.
   writer = None
   with open(outName,'r') as inf:
     outD = inf.read()
   # The file should be closed, but if it isn't, and this
   # is Windows, then the unlink() can fail. Wait and try again.
   try:
     os.unlink(outName)
   except Exception:
     import time
     time.sleep(1)
     try:
       os.unlink(outName)
     except Exception:
       pass
   self.assertEqual(inD.count('$$$$'),outD.count('$$$$'),'bad nMols in output')
Example #14
0
def remove_test_databases():
    db = dirs.tmp('test.db')
    if os.path.exists(db):
        os.unlink(db)
    pristine = dirs.tmp('test.db.pristine')
    if os.path.exists(pristine):
        os.unlink(pristine)
Example #15
0
def remove(path):
    if os.path.exists(path):
        print ' removing: ', path
        if os.path.isdir(path):
            shutil.rmtree(path)
        else:
            os.unlink(path)
Example #16
0
def run_debugger(testname, pythonfile, pydb_opts='', args='',
                 outfile=None):
    global srcdir, builddir, pydir

    rightfile   = os.path.join(srcdir, 'data', "%s.right" % testname)

    os.environ['PYTHONPATH']=os.pathsep.join(sys.path)
    cmdfile     = os.path.join(srcdir, "%s.cmd"   % testname)
    outfile     = "%s.out" % testname
    outfile_opt = '--output=%s ' % outfile

    # print "builddir: %s, cmdfile: %s, outfile: %s, rightfile: %s" % \
    # (builddir, cmdfile, outfile, rightfile)

    if os.path.exists(outfile): os.unlink(outfile)

    cmd = "%s --command %s %s %s %s %s" % \
          (pydb_path, cmdfile, outfile_opt, pydb_opts, pythonfile, args)
    
    os.system(cmd)
    fromfile  = rightfile
    fromdate  = time.ctime(os.stat(fromfile).st_mtime)
    fromlines = open(fromfile, 'U').readlines()
    tofile    = outfile
    todate    = time.ctime(os.stat(tofile).st_mtime)
    tolines   = open(tofile, 'U').readlines()
    diff = list(difflib.unified_diff(fromlines, tolines, fromfile,
                                     tofile, fromdate, todate))
    if len(diff) == 0:
        os.unlink(outfile)
    for line in diff:
        print line,
    return len(diff) == 0
Example #17
0
 def _reset_database(self, conn_string):
     conn_pieces = urlparse.urlparse(conn_string)
     if conn_string.startswith('sqlite'):
         # We can just delete the SQLite database, which is
         # the easiest and cleanest solution
         db_path = conn_pieces.path.strip('/')
         if db_path and os.path.exists(db_path):
             os.unlink(db_path)
         # No need to recreate the SQLite DB. SQLite will
         # create it for us if it's not there...
     elif conn_string.startswith('mysql'):
         # We can execute the MySQL client to destroy and re-create
         # the MYSQL database, which is easier and less error-prone
         # than using SQLAlchemy to do this via MetaData...trust me.
         database = conn_pieces.path.strip('/')
         loc_pieces = conn_pieces.netloc.split('@')
         host = loc_pieces[1]
         auth_pieces = loc_pieces[0].split(':')
         user = auth_pieces[0]
         password = ""
         if len(auth_pieces) > 1:
             if auth_pieces[1].strip():
                 password = "******" % auth_pieces[1]
         sql = ("drop database if exists %(database)s; "
                "create database %(database)s;") % locals()
         cmd = ("mysql -u%(user)s %(password)s -h%(host)s "
                "-e\"%(sql)s\"") % locals()
         exitcode, out, err = execute(cmd)
         self.assertEqual(0, exitcode)
Example #18
0
 def __restore_file(self,path):
     try:
         os.unlink(path)
     except:
         pass
     if os.path.exists(path + '.rpmnew'):
         os.rename(path + '.rpmnew', path)
Example #19
0
    def _stage_final_image(self):
        try:
            fs_related.makedirs(self.__ensure_isodir() + "/LiveOS")

            minimal_size = self._resparse()

            if not self.skip_minimize:
                fs_related.create_image_minimizer(self.__isodir + \
                                                      "/LiveOS/osmin.img",
                                                  self._image,
                                                  minimal_size)

            if self.skip_compression:
                shutil.move(self._image, self.__isodir + "/LiveOS/ext3fs.img")
            else:
                fs_related.makedirs(os.path.join(
                                        os.path.dirname(self._image),
                                        "LiveOS"))
                shutil.move(self._image,
                            os.path.join(os.path.dirname(self._image),
                                         "LiveOS", "ext3fs.img"))
                fs_related.mksquashfs(os.path.dirname(self._image),
                           self.__isodir + "/LiveOS/squashfs.img")

            self.__create_iso(self.__isodir)

            if self.pack_to:
                isoimg = os.path.join(self._outdir, self.name + ".iso")
                packimg = os.path.join(self._outdir, self.pack_to)
                misc.packing(packimg, isoimg)
                os.unlink(isoimg)

        finally:
            shutil.rmtree(self.__isodir, ignore_errors = True)
            self.__isodir = None
Example #20
0
    def invoke_tool(self, test_name, reference_file, current_file, diff_file, no_save=None):

        if no_save:
            response = ReconcileTool._read_console(
                "\n!!! MERGING NOT ALLOWED for {}: {}. Want to start kdiff3? ([y] | *): ".format(test_name, no_save)
            )
            if response != "y":
                return None

            cmd = (
                'kdiff3 "{reference_file}" --L1 "{test_name}_REFERENCE" ' '"{current_file}" --L2 "{test_name}_CURRENT" '
            ).format(reference_file=reference_file, current_file=current_file, test_name=test_name)
        else:
            merged_file = self._aux_file_name(current_file, "merged")

            cmd = (
                'kdiff3 -m "{reference_file}" --L1 "{test_name}_REFERENCE" '
                '"{current_file}" --L2 "{test_name}_CURRENT" '
                ' -o "{merged_file}"'
            ).format(
                reference_file=reference_file, current_file=current_file, merged_file=merged_file, test_name=test_name
            )

        print(cmd)
        code = ReconcileTool._invoke_command(cmd)
        if no_save:
            return None
        else:
            if code == 0:
                # Merged ok
                return merged_file
            else:
                if os.path.isfile(merged_file):
                    os.unlink(merged_file)
                return None
Example #21
0
def move_symlink(src, dst):
    """Create or overwrite a symlink"""
    try:
        os.unlink(dst)
    except OSError:
        pass
    os.symlink(src, dst)
Example #22
0
File: util.py Project: cesaro/impo
def polyop (query, prefix='impo: ', unlink=True) :
    inpath = None
    outpath = None
    try :
        # save the query into temp file
        fd, inpath = tempfile.mkstemp (suffix='.pop.ml')
        f = os.fdopen (fd, 'w')
        f.write (query)
        f.close ()

        # run polyop
        cmd = ['polyop', inpath]
        exitcode, out = runit (cmd, prefix=prefix)
        if exitcode != 0 :
            raise Exception, 'polyop: exit code %d, output: "%s"' % (exitcode, out)
        #print prefix, 'exit code 0'
        #print prefix, 'stdout: "%s"' % out

        # load result
        outpath = inpath + '.res'
        with open (outpath) as f : res = f.read ()

    finally :
        # remove temporary files, unless disabled by user
        if unlink :
            if inpath != None : os.unlink (inpath)
            if outpath != None : os.unlink (outpath)

    # strip and return
    return res.strip (' \t\n')
Example #23
0
 def test_successful_download(self):
     """Test successful_download."""
     page = pywikibot.FilePage(self.site, 'File:Albert Einstein.jpg')
     filename = join_images_path('Albert Einstein.jpg')
     status_code = page.download(filename)
     self.assertTrue(status_code)
     os.unlink(filename)
Example #24
0
def build_gfortran(ctx, target):
    binpath = os.path.join(ctx.out_dir, "bin")
    manpath = os.path.join(ctx.out_dir, "share", "man", "man1")
    includepath = os.path.join(ctx.out_dir, "include")

    binfiles = os.listdir(binpath)
    manfiles = os.listdir(manpath)
    srcpath = ctx.env.SRCPATH

    ctx.venv_exec("""
        base="gcc-42-5666.3-darwin11"
        pushd %(srcpath)s/3rdparty
        rm -fr "$base"
        mkdir -p "$base"
        pushd "$base"
        xar -xf "../$base.pkg"
        mv *.pkg/Payload Payload.gz
        pax --insecure -rz -f Payload.gz -s ",./usr,$VIRTUAL_ENV,"
        ln -sf "$VIRTUAL_ENV/bin/gfortran-4.2" "$VIRTUAL_ENV/bin/gfortran"
        popd
        rm -fr "$base"
        popd
    """ % locals())

    # Delete other files installed
    shutil.rmtree(os.path.join(includepath, "gcc"))

    for f in os.listdir(binpath):
        if f not in binfiles and not "gfortran" in f:
            os.unlink(os.path.join(binpath, f))

    for f in os.listdir(manpath):
        if f not in manfiles and not "gfortran" in f:
            os.unlink(os.path.join(manpath, f))
def linkBestCurrentSolution(dpath):
	alldsz = glob.glob(dpath + '/*dsz')
	currentLink = None
	bestDsz = None
	bestN = None
	stu = None
	for x in alldsz:
		if x.endswith('current.dsz'):
			assert os.path.islink(x)
			currentLink = os.readlink(x)
			continue
		fname = os.path.basename(x)
		m = CURSOL_RE_.match(fname)
		assert m is not None, ('failed to parse %s' % fname)
		nstu = m.group(1).upper()
		if stu is None:
			stu = nstu
		assert nstu == stu
		nth = int(m.group(2))
		if (bestDsz is None) or (nth > bestN):
			bestN = nth
			bestDsz = fname
	if bestDsz is None:
		return None
	currentDsz = os.path.join(dpath, stu + 'current.dsz')
	if os.path.islink(currentDsz):
		odsz = os.readlink(currentDsz)
		if odsz == bestDsz:
			return bestDsz
		os.unlink(currentDsz)
	os.symlink(bestDsz, currentDsz)
	return bestDsz
Example #26
0
File: fetch.py Project: d4s/osc
    def fetch(self, pac, prefix=''):
        # for use by the failure callback
        self.curpac = pac

        MirrorGroup._join_url = join_url
        mg = MirrorGroup(self.gr, pac.urllist, failure_callback=(self.failureReport, (), {}))

        if self.http_debug:
            print('\nURLs to try for package \'%s\':' % pac, file=sys.stderr)
            print('\n'.join(pac.urllist), file=sys.stderr)
            print(file=sys.stderr)

        try:
            with tempfile.NamedTemporaryFile(prefix='osc_build',
                                             delete=False) as tmpfile:
                mg.urlgrab(pac.filename, filename=tmpfile.name,
                           text='%s(%s) %s' % (prefix, pac.project, pac.filename))
                self.move_package(tmpfile.name, pac.localdir, pac)
        except URLGrabError as e:
            if self.enable_cpio and e.errno == 256:
                self.__add_cpio(pac)
                return
            print()
            print('Error:', e.strerror, file=sys.stderr)
            print('Failed to retrieve %s from the following locations '
                  '(in order):' % pac.filename, file=sys.stderr)
            print('\n'.join(pac.urllist), file=sys.stderr)
            sys.exit(1)
        finally:
            if os.path.exists(tmpfile.name):
                os.unlink(tmpfile.name)
Example #27
0
 def wait_for_match(self, images, similar_degree=0.98, timeout=300):
     """
     Compare VM screenshot with given images, if any image in the list
     matched, then return the image index, or return -1.
     """
     end_time = time.time() + timeout
     image_matched = False
     cropped_image = os.path.join(data_dir.get_tmp_dir(), "croped.ppm")
     while time.time() < end_time:
         vm_screenshot = self.get_screenshot()
         ppm_utils.image_crop_save(vm_screenshot, vm_screenshot)
         img_index = 0
         for image in images:
             logging.debug("Compare vm screenshot with image %s", image)
             ppm_utils.image_crop_save(image, cropped_image)
             h_degree = ppm_utils.image_histogram_compare(cropped_image,
                                                          vm_screenshot)
             if h_degree >= similar_degree:
                 logging.debug("Image %s matched", image)
                 image_matched = True
                 break
             img_index += 1
         if image_matched:
             break
         time.sleep(1)
     if os.path.exists(cropped_image):
         os.unlink(cropped_image)
     if os.path.exists(vm_screenshot):
         os.unlink(vm_screenshot)
     if image_matched:
         return img_index
     else:
         return -1
Example #28
0
def run_merge(filenames):
    """Merges all Skype databases to a new database."""
    dbs = [skypedata.SkypeDatabase(f) for f in filenames]
    db_base = dbs.pop()
    counts = collections.defaultdict(lambda: collections.defaultdict(int))
    postbacks = Queue.Queue()
    postfunc = lambda r: postbacks.put(r)
    worker = workers.MergeThread(postfunc)

    name, ext = os.path.splitext(os.path.split(db_base.filename)[-1])
    now = datetime.datetime.now().strftime("%Y%m%d")
    filename_final = util.unique_path("%s.merged.%s%s" %  (name, now, ext))
    print("Creating %s, using %s as base." % (filename_final, db_base))
    shutil.copyfile(db_base.filename, filename_final)
    db2 = skypedata.SkypeDatabase(filename_final)
    chats2 = db2.get_conversations()
    db2.get_conversations_stats(chats2)

    for db1 in dbs:
        chats = db1.get_conversations()
        db1.get_conversations_stats(chats)
        bar_total = sum(c["message_count"] for c in chats)
        bar_text = " Processing %.*s.." % (30, db1)
        bar = ProgressBar(max=bar_total, afterword=bar_text)
        bar.start()
        args = {"db1": db1, "db2": db2, "chats": chats,
                "type": "diff_merge_left"}
        worker.work(args)
        while True:
            result = postbacks.get()
            if "error" in result:
                print("Error merging %s:\n\n%s" % (db1, result["error"]))
                worker = None # Signal for global break
                break # break while True
            if "done" in result:
                break # break while True
            if "diff" in result:
                counts[db1]["chats"] += 1
                counts[db1]["msgs"] += len(result["diff"]["messages"])
                msgcounts = sum(c["message_count"] for c in result["chats"])
                bar.update(bar.value + msgcounts)
            if result["output"]:
                log(result["output"])
        if not worker:
            break # break for db1 in dbs
        bar.stop()
        bar.afterword = " Processed %s." % db1
        bar.update(bar_total)
        print

    if not counts:
        print("Nothing new to merge.")
        db2.close()
        os.unlink(filename_final)
    else:
        for db1 in dbs:
            print("Merged %s in %s from %s." %
                  (util.plural("message", counts[db1]["msgs"]),
                   util.plural("chat", counts[db1]["chats"]), db1))
        print("Merge into %s complete." % db2)
Example #29
0
    def process_response(self, request, response):
        if (settings.DEBUG or request.user.is_superuser) and 'prof' in request.GET:
            self.prof.close()

            out = StringIO.StringIO()
            old_stdout = sys.stdout
            sys.stdout = out

            stats = hotshot.stats.load(self.tmpfile)
            stats.sort_stats('time', 'calls')
            stats.print_stats()

            sys.stdout = old_stdout
            stats_str = out.getvalue()

            if response and response.content and stats_str:
                response.content = "<pre>" + stats_str + "</pre>"

            response.content = "\n".join(response.content.split("\n")[:40])

            response.content += self.summary_for_files(stats_str)

            os.unlink(self.tmpfile)

        return response
Example #30
0
    def testTruncateOnWindows(self):
        def bug801631():
            # SF bug <http://www.python.org/sf/801631>
            # "file.truncate fault on windows"
            f = self.FileIO(TESTFN, 'w')
            f.write(bytes(range(11)))
            f.close()

            f = self.FileIO(TESTFN,'r+')
            data = f.read(5)
            if data != bytes(range(5)):
                self.fail("Read on file opened for update failed %r" % data)
            if f.tell() != 5:
                self.fail("File pos after read wrong %d" % f.tell())

            f.truncate()
            if f.tell() != 5:
                self.fail("File pos after ftruncate wrong %d" % f.tell())

            f.close()
            size = os.path.getsize(TESTFN)
            if size != 5:
                self.fail("File size after ftruncate wrong %d" % size)

        try:
            bug801631()
        finally:
            os.unlink(TESTFN)
Example #31
0
def teardown_module(module):
    """remove the temporary file created by tests in this file
    this gets automatically called by nose"""
    os.unlink(temp)
Example #32
0
 def delete(self):
     os.unlink(self.filename)
Example #33
0
def main():
    import optparse
    parser = optparse.OptionParser('usage: %prog')

    parser.add_option('-m', '--mission',
                      default=DEFAULT_MISSION,
                      help='Mission [%default]')

    parser.add_option('-d', '--dataDir',
                      default=DEFAULT_DATA_DIR,
                      help='Data dir [%default]')

    parser.add_option('-r', '--resultsDir',
                      default=DEFAULT_RESULTS_DIR,
                      help='Results dir [%default]')

    parser.add_option('--reconstructBinary',
                      default=DEFAULT_RECONSTRUCT_BINARY,
                      help='Path to reconstruct binary [%default]')

    parser.add_option('-s', '--subsampleLevel',
                      default=DEFAULT_SUB,
                      help='Subsample level (can be empty) [%default]')

    parser.add_option('-P', '--numProcessors',
                      default=DEFAULT_NUM_PROCESSORS, type='int',
                      help='Number of processors to use [%default]')

    parser.add_option('-n', '--numFiles',
                      default=DEFAULT_NUM_FILES, type='int',
                      help='Number of files to process (0 to process all) [%default]')
    
    parser.add_option('--showConfig',
                      action='store_true', default=False,
                      help='Show config and exit')

    parser.add_option('--region',
                      default=DEFAULT_REGION,
                      help='Region to process [%default]')

    parser.add_option('--extra',
                      default=DEFAULT_EXTRA,
                      help='Extra label that appears at end of name [%default]')

    parser.add_option('--name',
                      default=DEFAULT_NAME,
                      help='Basename of results directory [%default]')

    parser.add_option('--feb13',
                      action='store_true', default=False,
                      help='Use InitAlbedoMosaicFeb13()')

    opts, args = parser.parse_args()

    # stuff extra information into opts
    if args:
        opts.steps = args
    else:
        opts.steps = DEFAULT_STEPS
    opts.HOME = os.environ['HOME']
    opts.date = time.strftime('%Y%m%d')
    opts.time = time.strftime('%H%M%S')

    # set up logging params
    resultsDir = expand('$resultsDir', vars(opts))
    logTimeStr = datetime.datetime.now().isoformat()
    logTimeStr = re.sub(r':', '', logTimeStr)
    logTimeStr = re.sub(r'\.\d+$', '', logTimeStr)
    logTmpl = resultsDir + '/log-%s.txt'
    logFile = logTmpl % logTimeStr
    opts.logFile = logFile

    # debug opts if user requested it
    if opts.showConfig:
        import json
        print json.dumps(vars(opts), indent=4, sort_keys=True)
        sys.exit(0)

    # start logging
    print 'Logging to %s' % logFile
    latestLogFile = logTmpl % 'latest'
    os.system('mkdir -p %s' % resultsDir)
    if os.path.lexists(latestLogFile):
        os.unlink(latestLogFile)
    os.symlink(os.path.basename(logFile), latestLogFile)
    logging.basicConfig(filename=logFile,
                        level=logging.DEBUG,
                        format="%(levelname)-6s %(asctime)-15s %(message)s")

    # do the real work
    runMaster(opts)
Example #34
0
 def cleanup():
     sys.modules.pop(name)
     os.unlink(imp.cache_from_source(module.__file__))
Example #35
0
 def cleanup():
     sys.modules.pop(name)
     os.unlink(imp.cache_from_source(mapping[name]))
 def DeleteApkTmpFiles(self):
   for _, _, tmp_files in self.apk_info.values():
     for tmp_file in tmp_files.values():
       os.unlink(tmp_file)
Example #37
0
    def run(self, dry_run=False):
        """
        Performs the clean-up operation by taking the following sequence of steps:
         1. lists all cache files in individual corpora cache dirs
         2. for all the corpora
           2.1 find which files are old enough (see TTL) to be deleted
           2.2 find an cache map entry in Redis and iterate over records found there
             2.2.1 if a record matches a file which is waiting to be deleted, then both the
                    file and the record are removed
             2.2.2 if a record does not match even any existing file than it is removed with
                   logged warning about a stale record
           2.3 if there are still some files to be deleted it means that they are 'unbound'
               (= there is no record in the respective cache map file);
                these files are also deleted with a warning

        Please note that this algorithm is unable to remove stale cache map entries as long
        as there is no existing file within a matching directory (e.g. there is a bunch
        of records for the "syn2010" corpus in Redis but the cache directory "/path/to/cache/syn2010"
        is empty). During normal operation, the system should be able to fix such consistency
        deviations. But it is a good idea to check the mapping from time to time whether there
        are no stale records (e.g. after a corpus was removed/blocked).

        arguments:
        dry_run -- if True then no actual writing/deleting is performed
        """
        num_deleted = 0
        num_processed = 0

        cache_files = self.list_dir()
        self._log_stats(cache_files)

        to_del = {}
        for corpus_id, corpus_cache_files in cache_files.items(
        ):  # processing corpus by corpus
            real_file_hashes = set(
            )  # to be able to compare cache map with actual files
            cache_key = self._entry_key_gen(corpus_id)
            for cache_entry in corpus_cache_files:
                num_processed += 1
                item_key = os.path.basename(cache_entry[0]).rsplit('.conc')[0]
                real_file_hashes.add(item_key)
                if self._ttl < cache_entry[1] / 60.:
                    to_del[item_key] = cache_entry[0]

            cache_map = self._db.hash_get_all(cache_key)
            if cache_map:
                try:
                    for item_hash, v in cache_map.items():
                        if item_hash in to_del:
                            if not dry_run:
                                os.unlink(to_del[item_hash])
                                self._db.hash_del(cache_key, item_hash)
                            else:
                                del to_del[item_hash]
                            num_deleted += 1
                        elif item_hash not in real_file_hashes:
                            if not dry_run:
                                self._db.hash_del(cache_key, item_hash)
                            logging.getLogger().warn(
                                'deleted stale cache map entry [%s][%s]' %
                                (cache_key, item_hash))
                except Exception as ex:
                    logging.getLogger().warn(
                        'Failed to process cache map file (will be deleted): %s'
                        % (ex, ))
                    self._db.remove(cache_key)
            else:
                logging.getLogger().error('Cache map [%s] not found' %
                                          cache_key)
                for item_hash, unbound_file in to_del.items():
                    if not dry_run:
                        os.unlink(unbound_file)
                    logging.getLogger().warn('deleted unbound cache file: %s' %
                                             unbound_file)

        ans = {
            'type': 'summary',
            'processed': num_processed,
            'deleted': num_deleted
        }
        logging.getLogger(__name__).info(json.dumps(ans))
        return ans
Example #38
0
 def doPlots(self):
     """
     """
     DATA_FILE = self.tempPedName  # for haploview
     INFO_FILE = self.tempMapName
     fblog, blog = tempfile.mkstemp()
     ste = open(blog, 'w')  # to catch the blather
     # if no need to rewrite - set up names for haploview call
     vcl = [
         javabin, '-jar', self.hvbin, '-n', '-memory',
         '%d' % self.memSize, '-pairwiseTagging', '-pedfile', DATA_FILE,
         '-info', INFO_FILE, '-tagrsqcounts', '-tagrsqcutoff', self.tagr2,
         '-ldcolorscheme', self.ldType
     ]
     if self.minMaf:
         vcl += ['-minMaf', '%f' % self.minMaf]
     if self.maxDist:
         vcl += ['-maxDistance', self.maxDist]
     if self.hiRes:
         vcl.append('-png')
     else:
         vcl.append('-compressedpng')
     if self.nchroms == 1:
         vcl += ['-chromosome', self.chromosome]
     if self.infotrack:
         vcl.append('-infoTrack')
     p = subprocess.Popen(' '.join(vcl),
                          shell=True,
                          cwd=self.outfpath,
                          stderr=ste,
                          stdout=self.lf)
     retval = p.wait()
     s = '## executing %s returned %d\n' % (' '.join(vcl), retval)
     self.lf.write(s)
     vcl = [self.mogrify, '-resize 800x400!', '*.PNG']
     p = subprocess.Popen(' '.join(vcl),
                          shell=True,
                          cwd=self.outfpath,
                          stderr=self.lf,
                          stdout=self.lf)
     retval = p.wait()
     s = '## executing %s returned %d\n' % (' '.join(vcl), retval)
     self.lf.write(s)
     inpng = '%s.LD.PNG' % DATA_FILE  # stupid but necessary - can't control haploview name mangle
     inpng = inpng.replace(' ', '')
     inpng = os.path.split(inpng)[-1]
     tmppng = '%s.tmp.png' % self.title
     tmppng = tmppng.replace(' ', '')
     outpng = '1_%s.png' % self.title
     outpng = outpng.replace(' ', '')
     outpng = os.path.split(outpng)[-1]
     vcl = [self.convert, '-resize 800x400!', inpng, tmppng]
     p = subprocess.Popen(' '.join(vcl),
                          shell=True,
                          cwd=self.outfpath,
                          stderr=self.lf,
                          stdout=self.lf)
     retval = p.wait()
     s = '## executing %s returned %d\n' % (' '.join(vcl), retval)
     self.lf.write(s)
     s = "text 10,300 '%s'" % self.title[:40]
     vcl = [
         self.convert, '-pointsize 25', '-fill maroon',
         '-draw "%s"' % s, tmppng, outpng
     ]
     p = subprocess.Popen(' '.join(vcl),
                          shell=True,
                          cwd=self.outfpath,
                          stderr=self.lf,
                          stdout=self.lf)
     retval = p.wait()
     s = '## executing %s returned %d\n' % (' '.join(vcl), retval)
     self.lf.write(s)
     try:
         os.remove(os.path.join(self.outfpath, tmppng))
     except:
         pass  # label all the plots then delete all the .PNG files before munging
     fnum = 1
     if self.hmpanels:
         sp = '%d' % (self.spos / 1000.)  # hapmap wants kb
         ep = '%d' % (self.epos / 1000.)
         for panel in self.hmpanels:
             if panel > '' and panel.lower(
             ) <> 'none':  # in case someone checks that option too :)
                 ptran = panel.strip()
                 ptran = ptran.replace('+', '_')
                 fnum += 1  # preserve an order or else we get sorted
                 vcl = [
                     javabin, '-jar', self.hvbin, '-n', '-memory',
                     '%d' % self.memSize, '-chromosome', self.chromosome,
                     '-panel',
                     panel.strip(), '-hapmapDownload', '-startpos', sp,
                     '-endpos', ep, '-ldcolorscheme', self.ldType
                 ]
                 if self.minMaf:
                     vcl += ['-minMaf', '%f' % self.minMaf]
                 if self.maxDist:
                     vcl += ['-maxDistance', self.maxDist]
                 if self.hiRes:
                     vcl.append('-png')
                 else:
                     vcl.append('-compressedpng')
                 if self.infotrack:
                     vcl.append('-infoTrack')
                 p = subprocess.Popen(' '.join(vcl),
                                      shell=True,
                                      cwd=self.outfpath,
                                      stderr=ste,
                                      stdout=self.lf)
                 retval = p.wait()
                 inpng = 'Chromosome%s%s.LD.PNG' % (self.chromosome, panel)
                 inpng = inpng.replace(' ', '')  # mysterious spaces!
                 outpng = '%d_HapMap_%s_%s.png' % (fnum, ptran,
                                                   self.chromosome)
                 # hack for stupid chb+jpt
                 outpng = outpng.replace(' ', '')
                 tmppng = '%s.tmp.png' % self.title
                 tmppng = tmppng.replace(' ', '')
                 outpng = os.path.split(outpng)[-1]
                 vcl = [self.convert, '-resize 800x400!', inpng, tmppng]
                 p = subprocess.Popen(' '.join(vcl),
                                      shell=True,
                                      cwd=self.outfpath,
                                      stderr=self.lf,
                                      stdout=self.lf)
                 retval = p.wait()
                 s = '## executing %s returned %d\n' % (' '.join(vcl),
                                                        retval)
                 self.lf.write(s)
                 s = "text 10,300 'HapMap %s'" % ptran.strip()
                 vcl = [
                     self.convert, '-pointsize 25', '-fill maroon',
                     '-draw "%s"' % s, tmppng, outpng
                 ]
                 p = subprocess.Popen(' '.join(vcl),
                                      shell=True,
                                      cwd=self.outfpath,
                                      stderr=self.lf,
                                      stdout=self.lf)
                 retval = p.wait()
                 s = '## executing %s returned %d\n' % (' '.join(vcl),
                                                        retval)
                 self.lf.write(s)
                 try:
                     os.remove(os.path.join(self.outfpath, tmppng))
                 except:
                     pass
     nimages = len(glob.glob(os.path.join(
         self.outfpath, '*.png')))  # rely on HaploView shouting - PNG @!
     self.lf.write('### nimages=%d\n' % nimages)
     if nimages > 0:  # haploview may fail?
         vcl = '%s -format pdf -resize 800x400! *.png' % self.mogrify
         p = subprocess.Popen(vcl,
                              shell=True,
                              cwd=self.outfpath,
                              stderr=self.lf,
                              stdout=self.lf)
         retval = p.wait()
         self.lf.write('## executing %s returned %d\n' % (vcl, retval))
         vcl = '%s *.pdf --fitpaper true --outfile alljoin.pdf' % self.pdfjoin
         p = subprocess.Popen(vcl,
                              shell=True,
                              cwd=self.outfpath,
                              stderr=self.lf,
                              stdout=self.lf)
         retval = p.wait()
         self.lf.write('## executing %s returned %d\n' % (vcl, retval))
         vcl = '%s alljoin.pdf --nup 1x%d --outfile allnup.pdf' % (
             self.pdfnup, nimages)
         p = subprocess.Popen(vcl,
                              shell=True,
                              cwd=self.outfpath,
                              stderr=self.lf,
                              stdout=self.lf)
         retval = p.wait()
         self.lf.write('## executing %s returned %d\n' % (vcl, retval))
         vcl = '%s -resize x300 allnup.pdf allnup.png' % (self.convert)
         p = subprocess.Popen(vcl,
                              shell=True,
                              cwd=self.outfpath,
                              stderr=self.lf,
                              stdout=self.lf)
         retval = p.wait()
         self.lf.write('## executing %s returned %d\n' % (vcl, retval))
     ste.close()  # temp file used to catch haploview blather
     hblather = open(blog, 'r').readlines()  # to catch the blather
     os.unlink(blog)
     if len(hblather) > 0:
         self.lf.write('## In addition, Haploview complained:')
         self.lf.write(''.join(hblather))
         self.lf.write('\n')
     self.lf.close()
     flist = glob.glob(os.path.join(self.outfpath, '*'))
     flist.sort()
     ts = '!"#$%&\'()*+,-/:;<=>?@[\\]^_`{|}~' + string.whitespace
     ftran = string.maketrans(ts, '_' * len(ts))
     outf = file(self.outfile, 'w')
     outf.write(galhtmlprefix % progname)
     s = '<h4>rgenetics for Galaxy %s, wrapping HaploView</h4>' % (progname)
     outf.write(s)
     mainthumb = 'allnup.png'
     mainpdf = 'allnup.pdf'
     if os.path.exists(os.path.join(self.outfpath, mainpdf)):
         if not os.path.exists(os.path.join(self.outfpath, mainthumb)):
             outf.write(
                 '<table><tr><td colspan="3"><a href="%s">Main combined LD plot</a></td></tr></table>\n'
                 % (mainpdf))
         else:
             outf.write(
                 '<table><tr><td><a href="%s"><img src="%s" alt="Main combined LD image" hspace="10" align="middle">'
                 % (mainpdf, mainthumb))
             outf.write(
                 '</td><td>Click the thumbnail at left to download the main combined LD image <a href=%s>%s</a></td></tr></table>\n'
                 % (mainpdf, mainpdf))
     else:
         outf.write(
             '(No main image was generated - this usually means a Haploview error connecting to Hapmap site - please try later)<br/>\n'
         )
     outf.write('<br><div><hr><ul>\n')
     for i, data in enumerate(flist):
         dn = os.path.split(data)[-1]
         if dn[:3] <> 'all':
             continue
         newdn = dn.translate(ftran)
         if dn <> newdn:
             os.rename(os.path.join(self.outfpath, dn),
                       os.path.join(self.outfpath, newdn))
             dn = newdn
         dnlabel = dn
         ext = dn.split('.')[-1]
         if dn == 'allnup.pdf':
             dnlabel = 'All pdf plots on a single page'
         elif dn == 'alljoin.pdf':
             dnlabel = 'All pdf plots, each on a separate page'
         outf.write('<li><a href="%s">%s - %s</a></li>\n' %
                    (dn, dn, dnlabel))
     for i, data in enumerate(flist):
         dn = os.path.split(data)[-1]
         if dn[:3] == 'all':
             continue
         newdn = dn.translate(ftran)
         if dn <> newdn:
             os.rename(os.path.join(self.outfpath, dn),
                       os.path.join(self.outfpath, newdn))
             dn = newdn
         dnlabel = dn
         ext = dn.split('.')[-1]
         if dn == 'allnup.pdf':
             dnlabel = 'All pdf plots on a single page'
         elif dn == 'alljoin.pdf':
             dnlabel = 'All pdf plots, each on a separate page'
         elif ext == 'info':
             dnlabel = '%s map data for Haploview input' % self.title
         elif ext == 'ped':
             dnlabel = '%s genotype data for Haploview input' % self.title
         elif dn.find('CEU') <> -1 or dn.find('YRI') <> -1 or dn.find(
                 'CHB_JPT') <> -1:  # is hapmap
             dnlabel = 'Hapmap data'
         if ext == 'TAGS' or ext == 'TESTS' or ext == 'CHAPS':
             dnlabel = dnlabel + ' Tagger output'
         outf.write('<li><a href="%s">%s - %s</a></li>\n' %
                    (dn, dn, dnlabel))
     outf.write('</ol><br>')
     outf.write("</div><div><hr>Job Log follows below (see %s)<pre>" %
                self.logfn)
     s = file(self.log_file, 'r').readlines()
     s = '\n'.join(s)
     outf.write('%s</pre><hr></div>' % s)
     outf.write('</body></html>')
     outf.close()
     if self.useTemp:
         try:
             os.unlink(self.tempMapName)
             os.unlink(self.tempPedName)
         except:
             pass
Example #39
0
File: crumb.py Project: da-x/crumb
    def handle_halt_queue(self, queue):
        for halt in queue:
            pid = halt.msg[0]
            mtype = halt.msg[1]
            if mtype == "VFS":
                func_effect, func_name, abspath = halt.msg[2:]
                abspathx = remove_trailing_slash(abspath)
                if not abspath.startswith(self._directory):
                    if func_effect == "MODIFY":
                        halt.target.created_paths.add(abspathx)
                    elif func_effect == "READ":
                        if not abspathx in halt.target.created_paths:
                            if os.path.isfile(abspathx):
                                # halt.target.log("TODO: external dep: " + abspathx)
                                pass
                    else:
                        raise Exception(parts)
                    halt.target.release(pid)
                    continue

                reltarget = abspath[len(self._directory)+1:]
                if not reltarget: # root dir stat
                    halt.target.release(pid)
                    continue

                resdef = get_crumb_def(os.path.dirname(reltarget), os.path.basename(reltarget))
                if not resdef:
                    # We don't know how to generate this target.
                    if func_effect == "READ":
                        # must be a leaf dep
                        if os.path.isfile(abspath):
                            # halt.target.log("TODO: internal leaf dep: " + reltarget)
                            if reltarget not in halt.target.leaf_deps:
                                halt.target.log("detected leaf dep: " + reltarget)
                            halt.target.leaf_deps.add(reltarget)
                            halt.target.release(pid)
                        else:
                            if not os.path.exists(abspath):
                                # halt.target.log("TODO: internal leaf dep (non-existing): " + reltarget)
                                pass
                            else:
                                halt.target.log("TODO: internal leaf dep (non-file): " + reltarget)
                            halt.target.release(pid)
                    elif func_effect == "MODIFY":
                        halt.target.log("detected internal tempfile: " + reltarget)
                        halt.target.release(pid)
                    else:
                        raise Exception(parts)
                else:
                    # We *do* know how to generate this target
                    if reltarget not in self.targets:
                        self.add_target(resdef)

                    target_path = resdef.target_path
                    dep_target = self.targets.get(target_path)
                    if func_effect == "MODIFY":
                        if dep_target is halt.target:
                            # It's us. Our output. Proceed.
                            halt.target.log("detected output: " + target_path)
                            halt.target.release(pid)
                        else:
                            # Nope, it's generating something else that doesn't belong to it
                            # but we know how to generate it ourselves, so something's amiss
                            halt.target.log("detected colliding side-effect: " + target_path)
                            raise Exception("can't handle it")
                    elif func_effect == "READ":
                        if dep_target.made:
                            # Already, done, proceed
                            if target_path not in halt.target.deps_already_done:
                                halt.target.log("dep target %s done already, proceeding" % (target_path, ))
                            halt.target.deps_already_done.add(target_path)
                            halt.target.release(pid)
                        else:
                            # Nope, we need to wait for it
                            if dep_target is halt.target:
                                # It's us. Our output. We try to probe it before generating it? No
                                # problem, make sure the old output isn't there, and proceed
                                if os.path.exists(abspath):
                                    os.unlink(abspath)
                                halt.target.release(pid)
                            else:
                                halt.target.log("waiting for dep target %s to be made" % (target_path, ))
                                self.deps_waiting.setdefault(target_path, []).append(halt)
                    else:
                        raise Exception(parts)
            else:
                raise Exception(parts)
Example #40
0
 def unlock(key: str) -> None:
     try:
         os.unlink(f"/var/run/blueman-{key}")
     except OSError:
         pass
def run(test, params, env):
    """
    Test command: virsh update-device.

    Update device from an XML <file>.
    1.Prepare test environment, adding a cdrom to VM.
    2.Perform virsh update-device operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    # Before doing anything - let's be sure we can support this test
    # Parse flag list, skip testing early if flag is not supported
    # NOTE: "".split("--") returns [''] which messes up later empty test
    flag = params.get("updatedevice_flag", "")
    flag_list = []
    if flag.count("--"):
        flag_list = flag.split("--")
    for item in flag_list:
        option = item.strip()
        if option == "":
            continue
        if not bool(virsh.has_command_help_match("update-device", option)):
            raise error.TestNAError(
                "virsh update-device doesn't support --%s" % option)

    # As per RH BZ 961443 avoid testing before behavior changes
    if 'config' in flag_list:
        # SKIP tests using --config if libvirt is 0.9.10 or earlier
        if not libvirt_version.version_compare(0, 9, 10):
            raise error.TestNAError("BZ 961443: --config behavior change "
                                    "in version 0.9.10")
    if 'persistent' in flag_list:
        # SKIP tests using --persistent if libvirt 1.0.5 or earlier
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("BZ 961443: --persistent behavior change "
                                    "in version 1.0.5")

    # Prepare initial vm state
    vm_name = params.get("main_vm")
    vmxml = VMXML.new_from_dumpxml(vm_name, options="--inactive")
    vm = env.get_vm(vm_name)
    start_vm = "yes" == params.get("start_vm", "no")

    # Get the target bus/dev
    target_bus = params.get("updatedevice_target_bus", "ide")
    target_dev = params.get("updatedevice_target_dev", "hdc")

    # Prepare tmp directory and files.
    orig_iso = os.path.join(test.virtdir, "orig.iso")
    test_iso = os.path.join(test.virtdir, "test.iso")
    test_diff_iso = os.path.join(test.virtdir, "test_diff.iso")
    update_xmlfile = os.path.join(test.tmpdir, "update.xml")
    create_attach_xml(update_xmlfile, test_iso, target_bus, target_dev)

    # This test needs a cdrom attached first - attach a cdrom
    # to a shutdown vm. Then decide to restart or not
    if vm.is_alive():
        vm.destroy()
    create_cdrom(vm_name, orig_iso, target_dev)
    if start_vm:
        vm.start()
        domid = vm.get_id()
    else:
        domid = "domid invalid; domain is shut-off"

    # Get remaining parameters for configuration.
    twice = "yes" == params.get("updatedevice_twice", "no")
    diff_iso = "yes" == params.get("updatedevice_diff_iso", "no")
    vm_ref = params.get("updatedevice_vm_ref", "")
    status_error = "yes" == params.get("status_error", "no")
    extra = params.get("updatedevice_extra", "")

    # OK let's give this a whirl...
    try:
        if vm_ref == "id":
            vm_ref = domid
            if twice:
                # Don't pass in any flags
                virsh.update_device(domainarg=domid,
                                    filearg=update_xmlfile,
                                    ignore_status=True,
                                    debug=True)
            if diff_iso:
                # Swap filename of device backing file in update.xml
                os.remove(update_xmlfile)
                create_attach_xml(update_xmlfile, test_diff_iso, target_bus,
                                  target_dev)
        elif vm_ref == "uuid":
            vm_ref = vmxml.uuid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref.find("updatedevice_invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "name":
            vm_ref = "%s %s" % (vm_name, extra)

        cmdresult = virsh.update_device(domainarg=vm_ref,
                                        filearg=update_xmlfile,
                                        flagstr=flag,
                                        ignore_status=True,
                                        debug=True)
        status = cmdresult.exit_status

        active_vmxml = VMXML.new_from_dumpxml(vm_name)
        inactive_vmxml = VMXML.new_from_dumpxml(vm_name, options="--inactive")
    finally:
        vm.destroy(gracefully=False, free_mac_addresses=False)
        vmxml.undefine()
        vmxml.restore()
        vmxml.define()
        if os.path.exists(orig_iso):
            os.remove(orig_iso)
        if os.path.exists(test_iso):
            os.remove(test_iso)
        if os.path.exists(test_diff_iso):
            os.remove(test_diff_iso)

    # Result handling logic set errmsg only on error
    errmsg = None
    if status_error:
        if status == 0:
            errmsg = "Run successfully with wrong command!"
    else:  # Normal test
        if status != 0:
            errmsg = "Run failed with right command"
        if diff_iso:  # Expect the backing file to have updated
            active_attached = is_attached(active_vmxml.devices, test_diff_iso,
                                          target_dev)
            inactive_attached = is_attached(inactive_vmxml.devices,
                                            test_diff_iso, target_dev)
        else:  # Expect backing file to remain the same
            active_attached = is_attached(active_vmxml.devices, test_iso,
                                          target_dev)
            inactive_attached = is_attached(inactive_vmxml.devices, test_iso,
                                            target_dev)

        # Check behavior of combination before individual!
        if "config" in flag_list and "live" in flag_list:
            if not active_attached:
                errmsg = ("Active domain XML not updated when "
                          "--config --live options used")
            if not inactive_attached:
                errmsg = ("Inactive domain XML not updated when "
                          "--config --live options used")

        elif "live" in flag_list and inactive_attached:
            errmsg = ("Inactive domain XML updated when " "--live option used")

        elif "config" in flag_list and active_attached:
            errmsg = ("Active domain XML updated when " "--config option used")

        # persistent option behavior depends on start_vm
        if "persistent" in flag_list:
            if start_vm:
                if not active_attached or not inactive_attached:
                    errmsg = ("XML not updated when --persistent "
                              "option used on active domain")

            else:
                if not inactive_attached:
                    errmsg = ("XML not updated when --persistent "
                              "option used on inactive domain")
        if len(flag_list) == 0:
            # Not specifying any flag is the same as specifying --current
            if start_vm:
                if not active_attached:
                    errmsg = "Active domain XML not updated"
                elif inactive_attached:
                    errmsg = ("Inactive domain XML updated when active "
                              "requested")

    # Log some debugging info before destroying instances
    if errmsg is not None:
        logging.debug("Active XML:")
        logging.debug(str(active_vmxml))
        logging.debug("Inactive XML:")
        logging.debug(str(inactive_vmxml))
        logging.debug("active_attached: %s", str(active_attached))
        logging.debug("inctive_attached: %s", str(inactive_attached))
        logging.debug("Device XML:")
        logging.debug(open(update_xmlfile, "r").read())

    # clean up tmp files
    del vmxml
    del active_vmxml
    del inactive_vmxml
    os.unlink(update_xmlfile)

    if errmsg is not None:
        raise error.TestFail(errmsg)
Example #42
0
    async def run(self) -> MigrationResult:
        await self.create_migration_table()

        app_modules = self.get_app_modules()

        migration_modules = {}

        for app_module in app_modules:
            app_config = getattr(app_module, "APP_CONFIG")
            if app_config.app_name == self.app_name:
                migration_modules = self.get_migration_modules(
                    app_config.migrations_folder_path)
                break

        ran_migration_ids = await Migration.get_migrations_which_ran(
            app_name=self.app_name)
        if len(ran_migration_ids) == 0:
            # Make sure a success is returned, as we don't want this
            # to appear as an error in automated scripts.
            message = "No migrations to reverse!"
            print(message)
            return MigrationResult(success=True, message=message)

        #######################################################################

        if self.migration_id == "all":
            earliest_migration_id = ran_migration_ids[0]
        elif self.migration_id == "1":
            earliest_migration_id = ran_migration_ids[-1]
        else:
            earliest_migration_id = self.migration_id

        if earliest_migration_id not in ran_migration_ids:
            message = ("Unrecognized migration name - must be one of "
                       f"{ran_migration_ids}")
            print(message, file=sys.stderr)
            return MigrationResult(success=False, message=message)

        #######################################################################

        latest_migration_id = ran_migration_ids[-1]

        start_index = ran_migration_ids.index(earliest_migration_id)
        end_index = ran_migration_ids.index(latest_migration_id) + 1

        subset = ran_migration_ids[start_index:end_index]
        reversed_migration_ids = list(reversed(subset))

        #######################################################################

        _continue = ("y" if self.auto_agree else input(
            "About to undo the following migrations:\n"
            f"{reversed_migration_ids}\n"
            "Enter y to continue.\n"))
        if _continue == "y":
            print("Undoing migrations")

            for migration_id in reversed_migration_ids:
                print(f"Reversing {migration_id}")
                migration_module = migration_modules[migration_id]
                response = await migration_module.forwards()

                if isinstance(response, MigrationManager):
                    await response.run_backwards()

                await Migration.delete().where(Migration.name == migration_id
                                               ).run()

                if self.clean:
                    os.unlink(migration_module.__file__)

            return MigrationResult(success=True)

        else:  # pragma: no cover
            message = "Not proceeding."
            print(message, file=sys.stderr)
            return MigrationResult(success=False, message=message)
        """
        t = dbconn.run_query(q)
        for s in t:
            demno = str(s['id'])

        #clipped_solar_raster_dir = out_path + os.sep + 'SRR_' + str(row['id'] / 1000 * 1000).zfill(4) + os.sep
        clipped_solar_raster_dir = out_path + os.sep + demno + os.sep
        clipped_solar_raster = clipped_solar_raster_dir + str(
            row['id']) + '.img'  # what raster format do we want???

        if not os.path.exists(clipped_solar_raster_dir):
            os.mkdir(clipped_solar_raster_dir)

        if os.path.isfile(clipped_solar_raster):
            print "WARNING: Overwriting existing solar raster image at " + clipped_solar_raster
            os.unlink(clipped_solar_raster)

        # run solar analyst
        try:
            solar_raster = arcpy.sa.AreaSolarRadiation(
                in_surface_raster, latitude, sky_size, time_configuration,
                day_interval, hour_interval, each_interval, z_factor,
                slope_aspect_input_type, calculation_directions,
                zenith_divisions, azimuth_divisions, diffuse_model_type,
                diffuse_proportion, transmissivity,
                out_direct_radiation_raster, out_diffuse_radiation_raster,
                out_direct_duration_raster)
            # clip to feature extent and saves output
            envelope = "{0} {1} {2} {3}".format(int(row['xmin']),
                                                int(row['ymin']),
                                                int(row['xmax']),
Example #44
0
def generate_checksums(repo,
                       output,
                       prg_wrap=None,
                       dl_path=None,
                       dl_mirror=None,
                       dl_slug=None,
                       curses=False,
                       force_cache=False,
                       list_files=False,
                       remove_prefixes=[]):
    logging.info('Parsing repo...')

    mods = RepoConf(repo)
    mods.parse_includes()

    if len(mods.mods) == 0:
        logging.error('No mods found!')
        return False

    if not mods.validate():
        logging.error('Failed to parse the repo!')
        return False

    cache = {'generated': 0, 'mods': {}}
    start_time = time.time()
    failed = False
    results = {}

    if os.path.isfile(output):
        with open(output, 'r') as stream:
            stream.seek(0, os.SEEK_END)

            # Don't try to parse the file if it's empty.
            if stream.tell() != 0:
                stream.seek(0)
                cache = json.load(stream)

        c_mods = {}
        for mod in cache['mods']:
            if mod['id'] not in c_mods:
                c_mods[mod['id']] = {}

            c_mods[mod['id']][mod['version']] = mod

        cache['mods'] = c_mods

    # Thu, 24 Jul 2014 12:00:16 GMT
    locale.setlocale(locale.LC_TIME, 'C')
    tstamp = time.strftime('%a, %d %b %Y %H:%M:%S GMT',
                           time.gmtime(cache['generated']))

    items = []
    for mid, mvs in mods.mods.items():
        for ver, mod in mvs.items():
            c_mod = cache['mods'].get(mid, {}).get(str(ver), {})
            c_pkgs = [pkg['name'] for pkg in c_mod.get('packages', [])]

            for pkg in mod.packages:
                my_tstamp = 0

                if force_cache and pkg.name in c_pkgs:
                    if mid not in results:
                        results[mid] = {}

                    if ver not in results[mid]:
                        results[mid][ver] = {}

                    if pkg.name not in results[mid][ver]:
                        results[mid][ver][pkg.name] = {}

                    for name, file_ in pkg.files.items():
                        results[mid][ver][pkg.name][name] = {
                            'md5sum': 'CACHE',
                            'contents': [],
                            'size': 0
                        }

                    continue

                # Only download the files if we have no checksums or they changed.
                if pkg.name in c_pkgs:
                    my_tstamp = tstamp

                for name, file_ in pkg.files.items():
                    # id_, links, name, archive, tstamp
                    items.append(
                        ((mid, mod.version, pkg.name, name), file_.urls, name,
                         file_.is_archive, my_tstamp))

    if len(items) < 1:
        if not force_cache:
            logging.error('No files found!')
            failed = True

        file_info = []
    else:
        logging.info('Generating checksums...')

        init_app()
        task = ChecksumTask(items, dl_path, dl_mirror, dl_slug,
                            remove_prefixes)
        run_task(task, prg_wrap, curses)
        file_info = task.get_results()

    logging.info('Saving data...')

    logos = {}
    for id_, csum, content, size in file_info:
        mid, ver, pkg, name = id_

        if csum == 'FAILED':
            failed = True
            continue

        if name == 'logo.jpg':
            if mid not in logos:
                logos[mid] = {}

            logos[mid][ver] = csum
        else:
            if mid not in results:
                results[mid] = {}

            if ver not in results[mid]:
                results[mid][ver] = {}

            if pkg not in results[mid][ver]:
                results[mid][ver][pkg] = {}

            results[mid][ver][pkg][name] = {
                'md5sum': csum,
                'contents': content,
                'size': size
            }

    outpath = os.path.dirname(output)
    new_cache = {'generated': start_time, 'mods': []}

    # Well, this looks horrible...
    # This loop copies the Mod objects from mods, adds the data from results and places the objects in new_cache['mods'].
    # It also merges in data from cache['mods'] if necessary (whenever the md5sum of a file/archive is set to CACHE).
    for mid, mvs in mods.mods.items():
        for ver, mod in mvs.items():
            mod = mod.copy()
            c_pkgs = {}
            sver = str(ver)

            # Retrieve our packages from cache
            if mid in cache['mods'] and sver in cache['mods'][mid]:
                for pkg in cache['mods'][mid][sver]['packages']:
                    c_pkgs[pkg['name']] = pkg.copy()

            if mid in logos and ver in logos[mid] and mod.logo is None:
                fd, name = tempfile.mkstemp(dir=outpath,
                                            prefix='logo',
                                            suffix='.' +
                                            logos[mid][ver].split('.')[-1])
                os.close(fd)

                shutil.move(logos[mid][ver], name)
                mod.logo = os.path.basename(name)

            for pkg in mod.packages:
                c_files = {}
                files = {}

                # Retrieve our files from cache
                if pkg.name in c_pkgs:
                    for item in c_pkgs[pkg.name]['files']:
                        c_files[item['filename']] = item

                    for item in c_pkgs[pkg.name]['filelist']:
                        if item['orig_name'] is None:
                            continue

                        ar = c_files[item['archive']]
                        if 'contents' not in ar:
                            ar['contents'] = {}

                        ar['contents'][item['orig_name']] = item['md5sum']

                # Look for files which should keep their old checksum (from cache).
                if mid in results and ver in results[
                        mid] and pkg.name in results[mid][ver]:
                    files = results[mid][ver][pkg.name]
                    for name, info in files.items():
                        if info['md5sum'] == 'CACHE':
                            if name not in c_files:
                                logging.error(
                                    'Tried to retrieve a checksum from the cache but it was\'t there! (This is a bug!)'
                                )
                                failed = True
                            else:
                                info['md5sum'] = c_files[name]['md5sum']
                                info['contents'] = c_files[name]['contents']
                                info['size'] = c_files[name]['filesize']
                else:
                    logging.error('Checksums for "%s" are missing!', mid)
                    failed = True

                # Copy the information from rests
                for name, info in pkg.files.items():
                    if name not in files:
                        logging.error(
                            'Missing information for file "%s" of package "%s".',
                            name, pkg.name)
                    else:
                        info.md5sum = files[name]['md5sum']
                        info.contents = files[name]['contents']
                        info.filesize = files[name]['size']

            mod.build_file_list()

            # Check files
            done = False
            for pkg in mod.packages:
                for item in pkg.filelist:
                    if item['filename'].endswith('mod.ini'):
                        if item['filename'] == 'mod.ini':
                            # All is well...
                            done = True
                            break
                        else:
                            prefix = item['filename'][:-7].strip('/')

                            logging.warn(
                                'Found mod.ini in folder %s. I assume you forgot the move action; I\'ll just add it myself.',
                                prefix)
                            mod.actions.append({
                                'type': 'move',
                                'paths': [prefix + '/*'],
                                'dest': '',
                                'glob': True
                            })
                            mod.build_file_list()

                            done = True
                            break

                if done:
                    break

            if list_files:
                file_list = [
                    'Please make sure the following is correct:',
                    'If a user has FS installed in C:\\Freespace2, your mod will install the following files:',
                    ''
                ]
                prefix = 'C:\\Freespace2\\' + mod.folder
                for pkg in mod.packages:
                    for item in pkg.filelist:
                        file_list.append(prefix + '\\' +
                                         item['filename'].replace('/', '\\'))

                logging.info('\n'.join(file_list))

            new_cache['mods'].append(mod.get())

    with open(output, 'w') as stream:
        json.dump(new_cache, stream, separators=(',', ':'))

    # Cleanup
    for path in logos.values():
        if os.path.isfile(path):
            os.unlink(path)

    if failed:
        logging.error('Failed!')
        return False
    else:
        logging.info('Done')
        return True
Example #45
0
def do_start(verb, pid_file, server, args):
    if verb != 'Respawn' and pid_file == CONF.pid_file:
        for pid_file, pid in pid_files(server, pid_file):
            if os.path.exists('/proc/%s' % pid):
                print(
                    _("%(serv)s appears to already be running: %(pid)s") % {
                        'serv': server,
                        'pid': pid_file
                    })
                return
            else:
                print(_("Removing stale pid file %s") % pid_file)
                os.unlink(pid_file)

        try:
            resource.setrlimit(resource.RLIMIT_NOFILE,
                               (MAX_DESCRIPTORS, MAX_DESCRIPTORS))
            resource.setrlimit(resource.RLIMIT_DATA, (MAX_MEMORY, MAX_MEMORY))
        except ValueError:
            print(
                _('Unable to increase file descriptor limit.  '
                  'Running as non-root?'))
        os.environ['PYTHON_EGG_CACHE'] = '/tmp'

    def write_pid_file(pid_file, pid):
        with open(pid_file, 'w') as fp:
            fp.write('%d\n' % pid)

    def redirect_to_null(fds):
        with open(os.devnull, 'r+b') as nullfile:
            for desc in fds:  # close fds
                try:
                    os.dup2(nullfile.fileno(), desc)
                except OSError:
                    pass

    def redirect_to_syslog(fds, server):
        log_cmd = 'logger'
        log_cmd_params = '-t "%s[%d]"' % (server, os.getpid())
        process = subprocess.Popen([log_cmd, log_cmd_params],
                                   stdin=subprocess.PIPE)
        for desc in fds:  # pipe to logger command
            try:
                os.dup2(process.stdin.fileno(), desc)
            except OSError:
                pass

    def redirect_stdio(server, capture_output):
        input = [sys.stdin.fileno()]
        output = [sys.stdout.fileno(), sys.stderr.fileno()]

        redirect_to_null(input)
        if capture_output:
            redirect_to_syslog(output, server)
        else:
            redirect_to_null(output)

    @gated_by(CONF.capture_output)
    def close_stdio_on_exec():
        fds = [sys.stdin.fileno(), sys.stdout.fileno(), sys.stderr.fileno()]
        for desc in fds:  # set close on exec flag
            fcntl.fcntl(desc, fcntl.F_SETFD, fcntl.FD_CLOEXEC)

    def launch(pid_file, conf_file=None, capture_output=False, await_time=0):
        args = [server]
        if conf_file:
            args += ['--config-file', conf_file]
            msg = (_('%(verb)sing %(serv)s with %(conf)s') % {
                'verb': verb,
                'serv': server,
                'conf': conf_file
            })
        else:
            msg = (_('%(verb)sing %(serv)s') % {'verb': verb, 'serv': server})
        print(msg)

        close_stdio_on_exec()

        pid = os.fork()
        if pid == 0:
            os.setsid()
            redirect_stdio(server, capture_output)
            try:
                os.execlp('%s' % server, *args)
            except OSError as e:
                msg = (_('unable to launch %(serv)s. Got error: %(e)s') % {
                    'serv': server,
                    'e': e
                })
                sys.exit(msg)
            sys.exit(0)
        else:
            write_pid_file(pid_file, pid)
            await_child(pid, await_time)
            return pid

    @gated_by(CONF.await_child)
    def await_child(pid, await_time):
        bail_time = time.time() + await_time
        while time.time() < bail_time:
            reported_pid, status = os.waitpid(pid, os.WNOHANG)
            if reported_pid == pid:
                global exitcode
                exitcode = os.WEXITSTATUS(status)
                break
            time.sleep(0.05)

    conf_file = None
    if args and os.path.exists(args[0]):
        conf_file = os.path.abspath(os.path.expanduser(args[0]))

    return launch(pid_file, conf_file, CONF.capture_output, CONF.await_child)
Example #46
0
 def tearDown( self):
     os.unlink( self.SAMPLE_FILE)
Example #47
0
 def tearDown(self):
     try:
         os.unlink(TESTFN)
     except OSError:
         pass
Example #48
0
def tif2kmz(args):
    if args.infile is not None:
        base, ext = path.splitext(args.infile)
        kmltif = base + '_kml' + ext
        kmlpng = base + '_kml' + '.png'
        kmlfile = base + '.kml'
        kmzfile = base + '.kmz'

        in_ds = gdal.Open(args.infile, gdal.GA_ReadOnly)
        # warp original dataset to WGS84 used by google earth
        tmp_ds = gdal.Warp(base + '_kml' + ext, in_ds, dstSRS='EPSG:4326')

        # get geotransform for latlonbox
        gt = tmp_ds.GetGeoTransform()
        cols = tmp_ds.RasterXSize
        rows = tmp_ds.RasterYSize

        # convert image format for use by Icon, nodata will be transparent
        gdal.Translate(kmlpng, tmp_ds, format="PNG")
        in_ds = None
        tmp_ds = None

        data = {
            'folder': {
                'name': base,
                'description': args.description
            },
            'overlay': {
                'name': base,
                'description': args.description
            },
            'imgurl': kmlpng,
            'north': gt[3],
            'south': gt[3] + gt[5] * rows,
            'west': gt[0],
            'east': gt[0] + gt[1] * cols,
            'rotation': 0,
            'color': "{}ffffff".format(hex(int(args.alpha * 255))[2:])
        }

        t = Template("""<?xml version="1.0" encoding="UTF-8"?>
<kml>
    <Folder>
        <name>{{ folder.name }}</name>
        <visibility>1</visibility>
        <description>{{ folder.description }}</description>
        <GroundOverlay>
            <name>{{ overlay.name }}</name>
            <visibility>1</visibility>
            <description>{{ overlay.description }}</description>
            <color>{{ color }} </color>
            <Icon>
              <href>{{imgurl}}</href>
            </Icon>
            <LatLonBox>
              <north>{{north}}</north>
              <south>{{south}}</south>
              <east>{{east}}</east>
              <west>{{west}}</west>
              <rotation>{{rotation}}</rotation>
            </LatLonBox>
        </GroundOverlay>
    </Folder>
</kml>""")

        with open(kmlfile, "w") as fo:
            fo.write(t.render(data))

        if args.zip:
            zipf = zipfile.ZipFile(kmzfile, "w", zipfile.ZIP_DEFLATED)
            zipf.write(kmlfile)
            zipf.write(kmlpng)
            zipf.close()

        if args.removetemp:
            if path.exists(kmltif): os.unlink(kmltif)
            if path.exists(kmlfile): os.unlink(kmlfile)
            if path.exists(kmlpng): os.unlink(kmlpng)
Example #49
0
    def extract(self, project, package, srcinfo, repo, arch):
            # fetch cpio headers
            # check file lists for library packages
            fetchlist, liblist = self.compute_fetchlist(project, package, srcinfo, repo, arch)

            if not fetchlist:
                msg = "no libraries found in %s/%s %s/%s"%(project, package, repo, arch)
                self.logger.info(msg)
                return None

            # mtimes in cpio are not the original ones, so we need to fetch
            # that separately :-(
            mtimes= self._getmtimes(project, package, repo, arch)

            self.logger.debug("fetchlist %s", pformat(fetchlist))
            self.logger.debug("liblist %s", pformat(liblist))

            debugfiles = set(['/usr/lib/debug%s.debug'%f for f in liblist])

            # fetch binary rpms
            downloaded = self.download_files(project, package, repo, arch, fetchlist, mtimes)

            # extract binary rpms
            tmpfile = os.path.join(CACHEDIR, "cpio")
            for fn in fetchlist:
                self.logger.debug("extract %s"%fn)
                with open(tmpfile, 'wb') as tmpfd:
                    if not fn in downloaded:
                        raise FetchError("%s was not downloaded!"%fn)
                    self.logger.debug(downloaded[fn])
                    r = subprocess.call(['rpm2cpio', downloaded[fn]], stdout=tmpfd, close_fds=True)
                    if r != 0:
                        raise FetchError("failed to extract %s!"%fn)
                    tmpfd.close()
                    cpio = CpioRead(tmpfile)
                    cpio.read()
                    for ch in cpio:
                        fn = ch.filename
                        if fn.startswith('./'): # rpm payload is relative
                            fn = fn[1:]
                        self.logger.debug("cpio fn %s", fn)
                        if not fn in liblist and not fn in debugfiles:
                            continue
                        dst = os.path.join(UNPACKDIR, project, package, repo, arch)
                        dst += fn
                        if not os.path.exists(os.path.dirname(dst)):
                            os.makedirs(os.path.dirname(dst))
                        self.logger.debug("dst %s", dst)
                        # the filehandle in the cpio archive is private so
                        # open it again
                        with open(tmpfile, 'rb') as cpiofh:
                            cpiofh.seek(ch.dataoff, os.SEEK_SET)
                            with open(dst, 'wb') as fh:
                                while True:
                                    buf = cpiofh.read(4096)
                                    if buf is None or buf == '':
                                        break
                                    fh.write(buf)
            os.unlink(tmpfile)

            return liblist
Example #50
0
    def install(self, package, update=False):
        if package.source_type == "directory":
            self.install_directory(package)

            return

        if package.source_type == "git":
            self.install_git(package)

            return

        args = ["install", "--no-deps"]

        if package.source_type == "legacy" and package.source_url:
            parsed = urlparse.urlparse(package.source_url)
            if parsed.scheme == "http":
                self._io.write_error(
                    "    <warning>Installing from unsecure host: {}</warning>".format(
                        parsed.hostname
                    )
                )
                args += ["--trusted-host", parsed.hostname]

            auth = get_http_basic_auth(
                Config.create("auth.toml"), package.source_reference
            )
            if auth:
                index_url = "{scheme}://{username}:{password}@{netloc}{path}".format(
                    scheme=parsed.scheme,
                    username=auth[0],
                    password=auth[1],
                    netloc=parsed.netloc,
                    path=parsed.path,
                )
            else:
                index_url = package.source_url

            args += ["--index-url", index_url]

        if update:
            args.append("-U")

        if package.hashes and not package.source_type:
            # Format as a requirements.txt
            # We need to create a requirements.txt file
            # for each package in order to check hashes.
            # This is far from optimal but we do not have any
            # other choice since this is the only way for pip
            # to verify hashes.
            req = self.create_temporary_requirement(package)
            args += ["-r", req]

            try:
                self.run(*args)
            finally:
                os.unlink(req)
        else:
            req = self.requirement(package)
            if not isinstance(req, list):
                args.append(req)
            else:
                args += req

            self.run(*args)
Example #51
0
 def close(self):
     '''Close the workspace and delete the temporary measurements file'''
     if self.measurements is not None and self.__filename is not None:
         self.measurements.close()
         os.unlink(self.__filename)
Example #52
0
 def tearDown(self):
     os.close(self.db_fd)
     os.unlink(flaskr.app.config['DATABASE'])
 def tearDown(self):
     "delete the file we have created"
     import os
     os.unlink("__unittests.xml")
Example #54
0
 def cleanup():
     if os.path.exists(old_dump):
         os.unlink(old_dump)
     if os.path.exists(new_dump):
         os.unlink(new_dump)
Example #55
0
def process(tsv, metric = None, query = None, enumsize = None):
    with open(tsv) as inf:
        lines = inf.readlines()
        colnames = lines[0][1:].strip().split('\t') # first line is a header that starts with '#'
        column = colnames.index # helper function to find column number by name
        data = [line.strip().split('\t') for line in lines[1:]]
        for i in xrange(len(data)):
            data[i][1] = long(data[i][1]) # store size is a long
            for j in xrange(5, len(colnames)):
                data[i][j] = float(data[i][j]) # all metrics have float values
        unique_vals = lambda colnum: list(OrderedDict.fromkeys(row[colnum] for row in data))
        ages = unique_vals(column('age class'))
        lengths = unique_vals(column('length class'))
        try:
            if metric is None:
                assert len(colnames) == 6
                metric = colnames[5]
            else:
                assert metric in colnames[5:]
        except AssertionError:
            raise AssertionError('-metric must be specified as one of %s' % ' '.join(colnames[5:]))
        try:
            all_queries = unique_vals(column('query'))
            if query is None:
                assert len(all_queries) == 1
                query = all_queries[0]
            else:
                assert query in all_queries
        except AssertionError:
            raise AssertionError('-query must be specified as one of %s' % ' '.join(all_queries))

    pdf_files = 'tmp-A%s-L%s.pdf'
    dat_file = 'tmp-data.tsv'
    gp_file = 'tmp.gp'
    tex_file = 'plot.tex'
    tex_aux_files = ['plot.aux', 'plot.log']
    all_files = [pdf_files % (age, length) for age in ages for length in lengths] + [dat_file, gp_file, tex_file] + tex_aux_files
    pdfs = defaultdict(dict)
    for age in ages:
        for length in lengths:
            have_data = False
            with open(dat_file, 'w') as outf:
                for row in data:
                    if row[column('query')] == query and row[column('age class')] == age and row[column('length class')] == length:
                        have_data = True
                        outf.write("%s\t%s\n" % (row[1], row[column(metric)]))
            if have_data:
                out_file = pdf_files % (age, length)
                pdfs[age][length] = out_file
                with open(gp_file, 'w') as outf:
                    if enumsize is not None:
                        normfact = float(enumsize)
                        plotx = 'set xrange [0:1]'
                    else:
                        normfact = 1
                        plotx = 'set autoscale x'
                    outf.write("""load "template.gp"
set term pdfcairo lw 8
unset grid
unset xtics
unset ytics
set output "{out_file}"
{plotx}
set yrange [0:100]
plot "{dat_file}" u ($1/{normfact}):($2*100) w lp ls 1 notitle
""".format(**locals()))
                gnuplot(gp_file)
    with open(tex_file, 'w') as outf:
        outf.write("""\documentclass[12pt]{article}
\usepackage{graphicx}
\usepackage{palatino}
\usepackage{diagbox}
\usepackage{pdflscape}

\\begin{document}
\\thispagestyle{empty}

\\begin{landscape}
\\begin{table}
\centering
\\begin{tabular}{l|%s}
%%\hline
\\diagbox{age}{length} & %s \\\\
\hline
""" % (\
        ''.join('l' for i in xrange(len(lengths))),\
        ' & '.join(lengths)))
        for age in ages:
            if age not in pdfs:
                continue
            outf.write("%s" % age)
            for length in lengths:
                if length not in pdfs[age]:
                    continue
                outf.write(" & \includegraphics[width=0.15\\textwidth]{%s}" % pdfs[age][length])
            outf.write("""\\\\
%\hline
""")
        if enumsize is not None:
            xaxis = 'sstore/enumeration bytes [0:1]'
        else:
            xaxis = '\# sstore windows'
        if metric[0] == 'p' and is_float(metric[1:]):
            yaxis = '%sth percentile' % metric[1:]
        else:
            yaxis = metric
        yaxis += ' percent error [0:100]'
        outf.write("""
\end{tabular}
\caption{%s query: x-axis %s, y-axis %s}
\end{table}
\end{landscape}

\end{document}""" % (query, xaxis, yaxis))
    pdflatex(tex_file)

    for fil in all_files:
        try:
            os.unlink(fil)
        except OSError: # file doesn't exist
            pass
    def handle(self, *args, **options):
        start = datetime.now()  # Measure the time it takes to run the script.
        day = options['date']
        if not day:
            day = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
        folder = options['folder_name']
        folder = path.join(settings.TMP_PATH, folder, day)
        sep = options['separator']
        filepath = path.join(folder, 'download_counts.hive')
        # Make sure we're not trying to update with mismatched data.
        if get_date_from_file(filepath, sep) != day:
            raise CommandError('%s file contains data for another day' %
                               filepath)
        # First, make sure we don't have any existing counts for the same day,
        # or it would just increment again the same data.
        DownloadCount.objects.filter(date=day).delete()

        # Memoize the files to addon relations and the DownloadCounts.
        download_counts = {}
        # Perf: preload all the files and slugs once and for all.
        # This builds two dicts:
        # - One where each key (the file_id we get from the hive query) has
        #   the addon_id as value.
        # - One where each key (the add-on slug) has the add-on_id as value.
        files_to_addon = dict(File.objects.values_list('id',
                                                       'version__addon_id'))
        slugs_to_addon = dict(Addon.objects.public().values_list('slug', 'id'))

        # Only accept valid sources, which are listed in the DownloadSource
        # model. The source must either be exactly one of the "full" valid
        # sources, or prefixed by one of the "prefix" valid sources.
        fulls = set(DownloadSource.objects.filter(type='full').values_list(
            'name', flat=True))
        prefixes = DownloadSource.objects.filter(type='prefix').values_list(
            'name', flat=True)

        with codecs.open(filepath, encoding='utf8') as count_file:
            for index, line in enumerate(count_file):
                if index and (index % 1000000) == 0:
                    log.info('Processed %s lines' % index)

                splitted = line[:-1].split(sep)

                if len(splitted) != 4:
                    log.debug('Badly formatted row: %s' % line)
                    continue

                day, counter, id_or_slug, src = splitted
                try:
                    # Clean up data.
                    id_or_slug = id_or_slug.strip()
                    counter = int(counter)
                except ValueError:
                    # Ignore completely invalid data.
                    continue

                if id_or_slug.strip().isdigit():
                    # If it's a digit, then it should be a file id.
                    try:
                        id_or_slug = int(id_or_slug)
                    except ValueError:
                        continue

                    # Does this file exist?
                    if id_or_slug in files_to_addon:
                        addon_id = files_to_addon[id_or_slug]
                    # Maybe it's an add-on ?
                    elif id_or_slug in files_to_addon.values():
                        addon_id = id_or_slug
                    else:
                        # It's an integer we don't recognize, ignore the row.
                        continue
                else:
                    # It's probably a slug.
                    if id_or_slug in slugs_to_addon:
                        addon_id = slugs_to_addon[id_or_slug]
                    else:
                        # We've exhausted all possibilities, ignore this row.
                        continue

                if not is_valid_source(src, fulls=fulls, prefixes=prefixes):
                    continue

                # Memoize the DownloadCount.
                if addon_id in download_counts:
                    dc = download_counts[addon_id]
                else:
                    dc = DownloadCount(date=day, addon_id=addon_id, count=0)
                    download_counts[addon_id] = dc

                # We can now fill the DownloadCount object.
                dc.count += counter
                dc.sources = update_inc(dc.sources, src, counter)

        # Create in bulk: this is much faster.
        DownloadCount.objects.bulk_create(download_counts.values(), 100)
        for download_count in download_counts.values():
            save_stats_to_file(download_count)
        log.info('Processed a total of %s lines' % (index + 1))
        log.debug('Total processing time: %s' % (datetime.now() - start))

        # Clean up file.
        log.debug('Deleting {path}'.format(path=filepath))
        unlink(filepath)
Example #57
0
    def handle_func(self, func, data):
        '''
        Execute this method in a multiprocess or thread
        '''
        if salt.utils.is_windows():
            self.functions = salt.loader.minion_mods(self.opts)
            self.returners = salt.loader.returners(self.opts, self.functions)
        ret = {
            'id': self.opts.get('id', 'master'),
            'fun': func,
            'schedule': data['name'],
            'jid': '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now())
        }

        proc_fn = os.path.join(salt.minion.get_proc_dir(self.opts['cachedir']),
                               ret['jid'])

        # Check to see if there are other jobs with this
        # signature running.  If there are more than maxrunning
        # jobs present then don't start another.
        # If jid_include is False for this job we can ignore all this
        # NOTE--jid_include defaults to True, thus if it is missing from the data
        # dict we treat it like it was there and is True
        if 'jid_include' not in data or data['jid_include']:
            jobcount = 0
            for basefilename in os.listdir(
                    salt.minion.get_proc_dir(self.opts['cachedir'])):
                fn_ = os.path.join(
                    salt.minion.get_proc_dir(self.opts['cachedir']),
                    basefilename)
                with salt.utils.fopen(fn_, 'r') as fp_:
                    job = salt.payload.Serial(self.opts).load(fp_)
                    if job:
                        if 'schedule' in job:
                            log.debug(
                                'schedule.handle_func: Checking job against '
                                'fun {0}: {1}'.format(ret['fun'], job))
                            if ret['schedule'] == job[
                                    'schedule'] and os_is_running(job['pid']):
                                jobcount += 1
                                log.debug(
                                    'schedule.handle_func: Incrementing jobcount, now '
                                    '{0}, maxrunning is {1}'.format(
                                        jobcount, data['maxrunning']))
                                if jobcount >= data['maxrunning']:
                                    log.debug(
                                        'schedule.handle_func: The scheduled job {0} '
                                        'was not started, {1} already running'.
                                        format(ret['schedule'],
                                               data['maxrunning']))
                                    return False
                    else:
                        try:
                            log.info('Invalid job file found.  Removing.')
                            os.remove(fn_)
                        except OSError:
                            log.info('Unable to remove file: {0}.'.format(fn_))

        salt.utils.daemonize_if(self.opts)

        ret['pid'] = os.getpid()

        if 'jid_include' not in data or data['jid_include']:
            log.debug('schedule.handle_func: adding this job to the jobcache '
                      'with data {0}'.format(ret))
            # write this to /var/cache/salt/minion/proc
            with salt.utils.fopen(proc_fn, 'w+') as fp_:
                fp_.write(salt.payload.Serial(self.opts).dumps(ret))

        args = None
        if 'args' in data:
            args = data['args']

        kwargs = None
        if 'kwargs' in data:
            kwargs = data['kwargs']

        try:
            if args and kwargs:
                ret['return'] = self.functions[func](*args, **kwargs)

            if args and not kwargs:
                ret['return'] = self.functions[func](*args)

            if kwargs and not args:
                ret['return'] = self.functions[func](**kwargs)

            if not kwargs and not args:
                ret['return'] = self.functions[func]()

            data_returner = data.get('returner', None)
            if data_returner or self.schedule_returner:
                if 'returner_config' in data:
                    ret['ret_config'] = data['returner_config']
                rets = []
                for returner in [data_returner, self.schedule_returner]:
                    if isinstance(returner, str):
                        rets.append(returner)
                    elif isinstance(returner, list):
                        rets.extend(returner)
                # simple de-duplication with order retained
                rets = OrderedDict.fromkeys(rets).keys()
                for returner in rets:
                    ret_str = '{0}.returner'.format(returner)
                    if ret_str in self.returners:
                        ret['success'] = True
                        self.returners[ret_str](ret)
                    else:
                        log.info(
                            'Job {0} using invalid returner: {1}. Ignoring.'.
                            format(func, returner))
        except Exception:
            log.exception("Unhandled exception running {0}".format(ret['fun']))
            # Although catch-all exception handlers are bad, the exception here
            # is to let the exception bubble up to the top of the thread context,
            # where the thread will die silently, which is worse.
        finally:
            try:
                os.unlink(proc_fn)
            except OSError as exc:
                if exc.errno == errno.EEXIST:
                    # EEXIST is OK because the file is gone and that's what
                    # we wanted
                    pass
                else:
                    log.error("Failed to delete '{0}': {1}".format(
                        proc_fn, exc.errno))
                    # Otherwise, failing to delete this file is not something
                    # we can cleanly handle.
                    raise
Example #58
0
 def _clean_up(self):
     try:
         BlockDev.loop_teardown(self.loop)
     except:
         pass
     os.unlink(self.dev_file)
Example #59
0
	def source(self):
		download("https://github.com/cpputest/cpputest/releases/download/v3.8/cpputest-3.8.zip", "cpputest-3.8.zip")
		unzip("cpputest-3.8.zip")
		os.unlink("cpputest-3.8.zip")
def extract_zips(unzip_dir, files):

    # Handles de-duping; if there are multiple files with the same data,
    # we will use only one of the equivalent set. If there is an entry in
    # the filename dictionary, that takes precedence.
    md5sums = [md5(fn) for fn in files]
    mdict = defaultdict(list)
    for md, fn in zip(md5sums, files):
        mdict[md].append(fn)

    # Try to parse out at least the kit number by trying a series of regular
    # expressions. Adding regular expressions at the end of this list is safer
    # than at the beginning. Order is important - rules at top are matched
    # first.

    # constants used in filename regular expressions
    # groupings (?:xxx) are ignored
    ws = r'^[_]?'
    nam1 = r"[a-z]{0,20}|O\&#39;[a-z]{3,20}|O['][a-z]{3,20}"
    cname = r'([\w]{1,20})'  #matches unicode chars; also matches digits though
    pnam = r'\(' + nam1 + r'\)'
    nam2 = r'(?:' + nam1 + '|' + pnam + r')'
    ndate = r'(?:(201[1-8][\d]{4}|201[1-8]-\d\d-\d\d|\d{4}201[1-8]))'
    sep = r'[\-\s\._]'
    seps = r'[\-\s\._]?'
    sepp = r'[\-\s\._]+'
    sepp = r'_'
    sept = r'[\-\s\._]{3}'
    bigy = r'(?:big' + seps + r'y(?:data)?|ydna)'
    rslt = r'(?:results|data|rawdata|vcfdata|raw data|csvexport|raw_data|raw|bigyrawdata)'
    name = r'((?:' + nam2 + seps + '){1,3})'
    kit = r'(?:(?:kit|ftdna)?[ #]?)?([enhb1-9][0-9]{3,6})'
    rzip = r'zip(?:.zip)?'
    snps = r'(?:[\-_]{,3}(?:r[\-_]?)?(?:cts\d{3,6}|fcg\d{4,5}|fgc\d{3,5}x?|p312|z\d{3,5}|df\d{2,3}x?|l\d{2,3}x?|u152|rs\d{4}|d27|sry\d{4}|m222|l\d{4}|s\d{4,6}|mc14|a\d{3,5}|zz\d{2}|zp\d{2}|z\d{2,3}|s\d{3}|pf\d{4}|by\d{3,5}|u106|l2|y\d{4,5}|yp\d{4,5})){1,3}'
    plac = r'(?:Germany|England|UnknownOrigin|Sweden|France|United_?Kingdom|Scotland|Ireland|Netherlands|Europe|Luxembour?g|Wales|Poland|Italy|CzechRepublic|Russia|Puerto-Rico|Switzerland|Algeria|Denmark|Slovakia|US|USA)?'
    name_re = [
        #0 e.g. bigy-Treece-N4826.zip
        (re.compile(ws + sepp.join([bigy, name, kit, rzip]),
                    re.I), 'name', 'kit'),
        #1 e.g. bigy-N4826-Treece.zip
        (re.compile(ws + sepp.join([bigy, kit, name, rzip]),
                    re.I), 'kit', 'name'),
        #2 e.g. N4826-bigy-Treece.zip
        (re.compile(ws + sepp.join([kit, bigy, name, rzip]),
                    re.I), 'kit', 'name'),
        #3 e.g. Treece - N4826 - bigy.zip
        (re.compile(ws + name + sept + kit + sept + bigy + sep + r'?\.zip',
                    re.I), 'name', 'kit'),
        #4 e.g. Treece N4826 bigy.zip
        (re.compile(ws + sepp.join([name, kit, bigy, rzip]),
                    re.I), 'name', 'kit'),
        #5 e.g. Treece N4826 bigy results 20140808.zip
        (re.compile(ws + sepp.join([name, kit, bigy, rslt, ndate, rzip]),
                    re.I), 'name', 'kit'),
        #6 e.g. bigy-Treece-N4826-FGC1233.zip
        (re.compile(ws + sepp.join([bigy, name, kit, snps, rzip]),
                    re.I), 'name', 'kit'),
        #7 e.g. FGC1234-N4826-Treece-England-bigy-rawdata-20140708.zip
        (re.compile(
            ws + sepp.join([snps, kit, name, plac, bigy, rslt, ndate, rzip]),
            re.I), 'kit', 'name'),
        #8 e.g. FGC1234-N4826-Treece-bigy-rawdata-20140708.zip
        (re.compile(ws + sepp.join([snps, kit, name, bigy, rslt, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #9 e.g. FGC1234-Treece-N4826-bigy-rawdata-20140708.zip
        (re.compile(ws + sepp.join([snps, name, kit, bigy, rslt, ndate, rzip]),
                    re.I), 'name', 'kit'),
        #10 e.g. FGC1234-N4826-Treece-bigy-rawdata.zip
        (re.compile(ws + sepp.join([snps, kit, name, bigy, rslt, rzip]),
                    re.I), 'kit', 'name'),
        #11 e.g. FGC1234-N4826-Treece-bigy-rawdata.zip
        (re.compile(ws + sepp.join([snps, kit, name, bigy, rzip]),
                    re.I), 'kit', 'name'),
        #12 e.g. FGC1234-N4826-Treece-England-bigy-rawdata.zip
        (re.compile(ws + sepp.join([snps, kit, name, plac, rslt, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #13 e.g. N4826_Treece_US_BigY_RawData_2018-01-03.zip
        (re.compile(
            ws + sepp.join([kit, name, plac, bigy, rslt, ndate]) + '.zip',
            re.I), 'kit', 'name'),
        #14 e.g. FGC1234-N4826-Treece-bigy-rawdata.zip
        (re.compile(ws + sepp.join([snps, kit, name, rslt, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #15 e.g. FGC1234-N4826-Treece-bigy-20140708.zip
        (re.compile(ws + sepp.join([snps, kit, name, bigy, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #16 e.g. FGC1234-N4826-Treece-20140708.zip
        (re.compile(ws + sepp.join([snps, kit, name, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #17 e.g. N4826-Treece-bigy-data-20140708.zip
        (re.compile(ws + sepp.join([kit, name, bigy, rslt, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #18 e.g. N4826-bigy-Treece-20140708.zip
        (re.compile(ws + sepp.join([kit, bigy, name, ndate, rzip]),
                    re.I), 'kit', 'name'),
        #19 e.g. FGC1234-Treece-N4826.zip
        (re.compile(ws + sepp.join([snps, name, kit, rzip]),
                    re.I), 'name', 'kit'),
        #20 e.g. FGC1234-Treece-N4826-bigy-rawdata.zip
        (re.compile(ws + sepp.join([snps, name, kit, bigy, rslt, rzip]),
                    re.I), 'name', 'kit'),
        #21 e.g. bigy-Lindström-548872.zip
        (re.compile(ws + sepp.join([bigy, cname, kit, rzip]),
                    re.I), 'name', 'kit'),
    ]

    trace(2, 'File names mapped, according to which regular expression:')
    # track counts - only for diagnostics
    cnt = defaultdict(int)
    # list of non-matching files
    nomatch = []
    # all of the file names we could parse
    fname_dict = {}

    bed_re = re.compile(r'(\b(?:\w*[^_/])?regions(?:\[\d\])?\.bed)')
    vcf_re = re.compile(r'(\b(?:\w*[^_/])?variants(?:\[\d\])?\.vcf)')
    zip_re = re.compile(r'(\b(?:\w*[^_/])?bigy.*\.zip)')
    mgroup = []
    for md, fnames in mdict.items():
        if md in rename_dict:
            kkit, nname = rename_dict[md]
            pathname = fnames[0]
            fname = os.path.split(pathname)[-1]
            rule = 'dm'
        else:
            for pathname in fnames:
                fname = os.path.split(pathname)[-1]
                if fname in rename_dict:
                    kkit, nname = rename_dict[fname]
                    rule = 'df'
                    break
            else:
                kkit = nname = None
        if not keepfile(md):
            trace(2, '{} skipped because of subsetting'.format(fname))
            continue

        if kkit:
            if kkit in ('None', '') and nname in ('None', ''):
                kkit = nname = None
                trace(
                    2, '{} skipped because of entry in mappings dictionary'.
                    format(fname))
            else:
                trace(
                    2, '{3:>2} {0:<50s} {1:<15s} {2:<10s}'.format(
                        fname, nname, kkit, rule))
                cnt[rule] += 1
            fname_dict[pathname] = kkit, nname
        else:
            pathname = fnames[0]  # only use one of the equivalent set
            if os.path.splitext(pathname)[-1].lower() != '.zip':
                trace(
                    2,
                    'Found foreigner hanging out in zip directory: {0}'.format(
                        fname))
                continue
            d = {}
            fname = os.path.split(pathname)[-1]
            for ii, (r, k1, k2) in enumerate(name_re):
                s = r.search(fname)
                if s:
                    d[k1] = s.groups()[0]
                    if k2:
                        d[k2] = s.groups()[1]
                    else:
                        d['name'] = 'Unknown'
                    d['name'] = name_preference(d['name'])
                    try:
                        trace(
                            2, '{3:>2} {0:<50s} {1:<15s} {2:<10s}'.format(
                                fname, d['name'], d['kit'], ii))
                        cnt[ii] += 1
                        fname_dict[pathname] = d['kit'], d['name']
                    except:
                        trace(0, 'FAILURE on filename:', fname)
                    break
            else:
                nomatch.append(pathname)
        if len(fnames) > 1:
            for eq in [p for p in fnames if p != pathname]:
                trace(2, '  -same: {}'.format(os.path.split(eq)[-1]))

    trace(2, 'Number of filenames not matched: {0}'.format(len(nomatch)))
    trace(2, 'Which expressions were matched:')

    def keyfunc(v):
        return '{0!s:0>2}'.format(v)

    for nn in sorted(cnt, key=keyfunc):
        trace(2, '{0:>2}: {1:>4}'.format(nn, cnt[nn]))

    if nomatch:
        trace(1, 'Files that did not match:')
        for ll in nomatch:
            trace(1, ll.strip())
    else:
        trace(1, 'All files matched a rule')

    zipcount = 0

    # keep track of what needs to be cleaned up
    emptydirs = []

    import zipfile

    for fname in fname_dict:
        kitnumber, kitname = fname_dict[fname]
        if kitnumber == None:
            continue
        if keep_files:
            vcffile = os.path.join(unzip_dir,
                                   '%s-%s.vcf' % (kitname, kitnumber))
            bedfile = os.path.join(unzip_dir,
                                   '%s-%s.bed' % (kitname, kitnumber))
            # no checking to see if the contents are good, but that's what was asked for
            if os.path.isfile(vcffile) and os.path.isfile(bedfile):
                trace(
                    1, '%s-%s already exists and keep flag - skipping' %
                    (kitname, kitnumber))
                continue
        try:
            zf = zipfile.ZipFile(fname)
        except:
            trace(
                0,
                'WARN: not a zip file: {} from {}'.format(fname, os.getcwd()))
            continue
        listfiles = zf.namelist()
        bedfile = vcffile = zipfname = None
        for ff in listfiles:
            dirname, basename = os.path.split(ff)
            if bed_re.search(basename):
                bedfile = ff
            elif vcf_re.search(basename):
                vcffile = ff
            elif zip_re.search(basename):
                zipfname = ff
            if dirname and (dirname not in emptydirs):
                emptydirs.append(dirname)
        if (not bedfile) or (not vcffile):
            if not zipfname:
                trace(0, 'WARN: missing data in ' + fname)
                continue
            else:
                try:
                    zf.extractall(unzip_dir, [
                        zipfname,
                    ])
                    emptydirs.append(os.path.join(unzip_dir, zipfname))
                    bedfile = 'regions.bed'
                    vcffile = 'variants.vcf'
                    zf = zipfile.ZipFile(os.path.join(unzip_dir, zipfname))
                except:
                    trace(0, 'WARN: missing data in ' + fname)
                    continue
        try:
            zf.extractall(unzip_dir, [bedfile, vcffile])
        except RuntimeError:
            trace(0, 'WARN: {} would not extract - encrypted?'.format(base))
        base = '%s-%s' % (kitname, kitnumber)

        fpath = os.path.join(unzip_dir, '%s')
        trace(3, fpath % base)
        if vars(namespace)['rename']:
            try:
                os.link(fname, (fpath % base) + '.zip')
                trace(1, 'ln {} {}.zip'.format(fname, (fpath % base)))
            except:
                shutil.copy2(fname, (fpath % base) + '.zip')
                trace(1, 'cp -p {} {}.zip'.format(fname, (fpath % base)))
            emptydirs.append(fpath % bedfile)
            emptydirs.append(fpath % vcffile)
        else:
            try:
                os.rename(fpath % bedfile, (fpath % base) + '.bed')
                os.rename(fpath % vcffile, (fpath % base) + '.vcf')
            except:
                trace(
                    0,
                    'WARN: could not identify VCF and/or BED file for ' + base)
        zipcount += 1

    trace(0, '%d new files extracted' % zipcount)

    # clean up any empty dirs unzip created
    if emptydirs:
        trace(3, 'Trying to remove droppings:')
        for dir in emptydirs:
            if os.path.isfile(dir):
                os.unlink(dir)
        for dir in emptydirs:
            if os.path.isfile(dir):
                continue
            try:
                dp = os.path.join(unzip_dir, dir)
                os.removedirs(dp)
                trace(3, '  {0}'.format(dp))
            except FileNotFoundError:
                pass
            except:
                trace(3, '  W! could not remove {0}'.format(dp))
                pass

    # list of file names we unzipped
    files = os.listdir(unzip_dir)
    return files