Exemplo n.º 1
0
def sqlize(rdf_file, kb_name, binary=True):
    """Convert an RDF file into a SQL dump.

    Arguments:
        rdf_file (str): Path to the input RDF file.
        kb_name (str): The name of the KB. Used as the stem for the output filename.
        binary (bool): Whether to save to a binary SQLite file. Defaults to True.

    Returns:
        str: Filename of the output file.

    Raises:
        FileExistsError: If any intermediate files already exist.
    """
    rdf_file = realpath(expanduser(rdf_file))
    directory = dirname(rdf_file)
    sql_file = join_path(directory, kb_name + '.sql')
    if file_exists(sql_file):
        raise FileExistsError(sql_file)
    RDFSQLizer().sqlize(rdf_file, kb_name, sql_file)
    if binary:
        rdfsqlite_file = join_path(directory, kb_name + '.rdfsqlite')
        if file_exists(rdfsqlite_file):
            raise FileExistsError(sql_file)
        read_dump(sql_file, rdfsqlite_file)
        remove(sql_file)
        return rdfsqlite_file
    else:
        return sql_file
Exemplo n.º 2
0
def cli_args_helper(arguments, parser):
    from os.path import exists as file_exists

    if (not (arguments.help)):
        if (not (arguments.sample)):
            exiting("The path to the sample file is empty!", -1)
        else:
            if (not (file_exists(arguments.sample))):
                exiting("The file < " + arguments.sample + " > doesn't exist!",
                        -1)

        if (not (arguments.out_file)):
            exiting("The path to the payload output file is empty!", -1)
        else:
            if (file_exists(arguments.out_file)):
                exiting(
                    "The file < " + arguments.out_file + " > already exist!",
                    -1)

        if (arguments.out_stage1):
            if (file_exists(arguments.out_stage1)):
                exiting(
                    "The file <" + arguments.out_stage1 + "> already exist!",
                    -1)

    # Help message and exit.
    if ((arguments.help) or (len(argv) <= 1)):
        parser.print_help()
        exit(0)

    return (arguments)
Exemplo n.º 3
0
    def test_episodes_and_examples_to_json(self):
        episode_1 = Episode(('a', 'b'))
        episode_2 = Episode(('b', ))
        examples_1 = EventSequences(event_sequences=(EventSequence(
            sequence_of_events=[Event('a', 1), Event('b', 2)]
        ), EventSequence(
            sequence_of_events=[Event('a', 2), Event('b', 3)])))
        examples_2 = EventSequences(
            event_sequences=(EventSequence(sequence_of_events=[Event('b', 2)]),
                             EventSequence(
                                 sequence_of_events=[Event('b', 3)])))
        episode_1.examples = examples_1
        episode_2.examples = examples_2
        episodes = Episodes((episode_1, episode_2))

        episodes.to_json('episodes.txt')
        self.assertTrue(file_exists('episodes.txt'))
        # TODO: test 'episodes.txt' contents
        file_remove('episodes.txt')

        episodes.examples_to_json('examples.txt')
        self.assertTrue(file_exists('examples.txt'))

        with open('examples.txt') as f:
            result = f.read()
        expected = '''[[["a", 1], ["b", 2]], [["a", 2], ["b", 3]]]\n[[["b", 2]], [["b", 3]]]\n'''
        self.assertEqual(expected, result)
        file_remove('examples.txt')

        result = episodes.examples_to_json()
        self.assertEqual(expected, result)
Exemplo n.º 4
0
    def test_restore_with_relative_path(self):
        self.fake_trash_dir.add_trashed_file(
            "file1", pj(self.curdir, "path", "to", "file1"), "contents")
        self.assertEqual(
            True, file_exists(pj(self.trash_dir, "info", "file1.trashinfo")))
        self.assertEqual(True, file_exists(pj(self.trash_dir, "files",
                                              "file1")))

        result = self.run_command(
            "trash-restore", ["%(curdir)s" % {
                'curdir': "."
            }, '--sort=path'],
            input='0')

        self.assertEqual(
            """\
   0 2000-01-01 00:00:01 %(curdir)s/path/to/file1
What file to restore [0..0]: """ % {'curdir': self.curdir}, result.stdout)
        self.assertEqual("", result.stderr)
        self.assertEqual("contents", read_file(pj(self.curdir,
                                                  "path/to/file1")))
        self.assertEqual(
            False, file_exists(pj(self.trash_dir, "info", "file1.trashinfo")))
        self.assertEqual(False,
                         file_exists(pj(self.trash_dir, "files", "file1")))
Exemplo n.º 5
0
    def get_sales_train_val(self, memopt=True) -> pd.DataFrame:
        """
        Get sales_train_validation.csv dataframe
        Args:
            memopt (bool, optional): do memory optimization. Defaults to True.
        Returns:
            pd.DataFrame: sales train validation dataframe
        """

        save_path = self.data_root + "/processed/stv_memopt.bin"

        read_file = "/extracted/sales_train_validation.csv"

        if file_exists(save_path):
            print(f"Loading {save_path}")
            df = pd.read_pickle(save_path)
        else:
            df = pd.read_csv(self.data_root + read_file)
            if memopt:
                df = self.util_handle.reduce_memory_usage(df)

                if not file_exists(save_path):
                    print(f"Saving {save_path}")
                    df.to_pickle(save_path, )

            else:
                pass

        return df
Exemplo n.º 6
0
    def test_episodes_and_examples_to_json(self):
        episode_1 = Episode(('a', 'b'))
        episode_2 = Episode(('b',))
        examples_1 = EventSequences(event_sequences=(EventSequence(sequence_of_events=[Event('a', 1), Event('b', 2)]), 
                                                     EventSequence(sequence_of_events=[Event('a', 2), Event('b', 3)])))
        examples_2 = EventSequences(event_sequences=(EventSequence(sequence_of_events=[Event('b', 2)]), 
                                                     EventSequence(sequence_of_events=[Event('b', 3)])))
        episode_1.examples = examples_1
        episode_2.examples = examples_2
        episodes = Episodes((episode_1, episode_2))
        
        episodes.to_json('episodes.txt')
        self.assertTrue(file_exists('episodes.txt'))
        # TODO: test 'episodes.txt' contents
        file_remove('episodes.txt')

        
        episodes.examples_to_json('examples.txt')        
        self.assertTrue(file_exists('examples.txt'))

        with open('examples.txt') as f:
            result = f.read()
        expected='''[[["a", 1], ["b", 2]], [["a", 2], ["b", 3]]]\n[[["b", 2]], [["b", 3]]]\n'''        
        self.assertEqual(expected, result)
        file_remove('examples.txt')
        
        result = episodes.examples_to_json()
        self.assertEqual(expected, result)
Exemplo n.º 7
0
def read_catalog(filebase=None, return_dtype=np.float):
    """
    Reads a galaxy/randoms catalog and returns 3 XYZ arrays.

    Parameters
    -----------

    filebase: string (optional)
        The fully qualified path to the file. If omitted, reads the
        theory galaxy catalog under ../theory/tests/data/

    return_dtype: numpy dtype for returned arrays. Default ``numpy.float``
        Specifies the datatype for the returned arrays. Must be in
        {np.float, np.float32}

    Returns
    --------

    ``x y z`` - Unpacked numpy arrays compatible with the installed
    version of ``Corrfunc``.


    .. note:: If the filename is omitted, then first the fast-food file
        is searched for, and then the ascii file. End-users should always
        supply the full filename.


    """

    if filebase is None:
        filename = pjoin(dirname(abspath(__file__)), "../theory/tests/data/",
                         "gals_Mr19")
        allowed_exts = {
            '.ff': read_fastfood_catalog,
            '.txt': read_ascii_catalog,
            '.dat': read_ascii_catalog,
            '.csv': read_ascii_catalog
        }

        for e in allowed_exts:
            if file_exists(filename + e):
                f = allowed_exts[e]
                x, y, z = f(filename + e, return_dtype)
                return x, y, z

        raise IOError("Could not locate {0} with any of these extensions \
        = {1}".format(filename, allowed_exts.keys()))
    else:
        # Likely an user-supplied value
        if file_exists(filebase):
            extension = splitext(filebase)[1]
            f = read_fastfood_catalog if '.ff' in extension else read_ascii_catalog

            # default return is double
            x, y, z = f(filebase, return_dtype)
            return x, y, z

        raise IOError("Could not locate file {0}".format(filebase))
Exemplo n.º 8
0
def main(command, arg):
    if command == 'list':
        history = HistoryHandler()
        history.apply_file(arg)
    elif command == 'download':
        if len(glob(arg + ".*")) == 0:
            CatChangeset.download(int(arg))
            sleep(apidelay)
    elif command == 'process':
        fn = arg.replace('.osm', '.osc')
        if len(glob(fn + '*')) == 0:
            try:
                cs = CatChangeset(arg)
                log.info(f"{cs.id} has {len(cs.buildings)} buildings and "
                         f"{len(cs.parts)} parts")
                cs.get_missing_parts()
                if cs.error > 0 and not DEBUG:
                    log.error(f"{cs.id} has errors")
                elif len(cs.osc.ways) + len(cs.osc.relations) > 0:
                    cs.osc.write(DEBUG)
                else:
                    log.warning(f"{cs.id} has no missing building parts")
            except RuntimeError:
                if file_exists(fn):
                    os.remove(fn)
                log.error(f"{arg.replace('.osm', '')} runtime error")
        if file_exists(arg):
            os.remove(arg)
    elif command == 'upload':
        if DEBUG:
            print("This option is intentionally deactivated")
        elif not file_exists(arg + '.gz'):
            csid = arg.replace('.osc', '')
            upload = UploadHandler()
            upload.apply_file(arg)
            cs = api.ChangesetCreate({
                'comment': cscomment + csid,
                'source': sourcetext,
                'type': 'bot',
                'url': csurl,
            })
            try:
                api.ChangesetUpload(upload.data)
                api.ChangesetClose()
                log.info(f"{csid} fixed in changeset {cs}")
                with open(arg, 'rb') as f_in:
                    with gzip.open(arg + '.gz', 'wb') as f_out:
                        shutil.copyfileobj(f_in, f_out)
                os.remove(arg)
            except osmapi.ApiError as e:
                log.error(f"{csid} {str(e)}")
                os.rename(arg, arg + '.failed')
            sleep(apidelay)
    else:
        help()
Exemplo n.º 9
0
    def test_dot_dot_argument_is_skipped_even_in_subdirs(self):

        self.run_trashput("trash-put", "sandbox/..", "other_argument")

        # the dot directory shouldn't be operated, but a diagnostic message
        # shall be writtend on stderr
        self.stderr_should_be(
            "trash-put: cannot trash '..' directory 'sandbox/..'\n")

        # the remaining arguments should be processed
        assert not file_exists('other_argument')
        assert file_exists('sandbox')
Exemplo n.º 10
0
  def lookup(self):
    filename = self.template.filename
    path = '%%s/%s'%filename
    file = None

    if file_exists(filename) and isfile(filename):
      file = open(filename)
    else:
      for dir in self.template.directories:
        if file_exists(path%dir) and isfile(path%dir):
          file = open(path%dir)
          break
    return file
Exemplo n.º 11
0
 def __init__(self, username):
     user_file = open(
         'Save Location.txt').read() + 'UserData/' + username.lower()
     history_file = readCSVFile(user_file + '/history.csv') if file_exists(
         user_file + '/history.csv') else []
     datafile = readCSVFile(user_file + '/data.csv') if file_exists(
         user_file + '/data.csv') else []
     self.username = username
     self.historyDict = history_file
     self.historyPath = 'UserData/' + self.username + '/GamesHistory/GamesHistory.csv'
     data = None if len(datafile) < 1 else datafile[0]
     self.firstName = data['FirstName']
     self.lastName = data['LastName']
     self.registered = self.username in GetRegisteredUsers()
     del data
Exemplo n.º 12
0
    def try_load_or_create_new(session_user_id):
        """Loads a saved session_user_id.session or creates a new one.
           If session_user_id=None, later .save()'s will have no effect.
        """
        if session_user_id is None:
            return Session(None)
        else:
            path = '{}.session'.format(session_user_id)
            result = Session(session_user_id)
            if not file_exists(path):
                return result

            try:
                with open(path, 'r') as file:
                    data = json.load(file)
                    result.port = data.get('port', result.port)
                    result.salt = data.get('salt', result.salt)
                    result.layer = data.get('layer', result.layer)
                    result.server_address = \
                        data.get('server_address', result.server_address)

                    # FIXME We need to import the AuthKey here or otherwise
                    # we get cyclic dependencies.
                    from ..crypto import AuthKey
                    if data.get('auth_key_data', None) is not None:
                        key = b64decode(data['auth_key_data'])
                        result.auth_key = AuthKey(data=key)

                    for e_mid, e_hash in data.get('entities', []):
                        result._input_entities[e_mid] = e_hash

            except (json.decoder.JSONDecodeError, UnicodeDecodeError):
                pass

            return result
Exemplo n.º 13
0
def cache_calc(filename, func, *args, **kwargs):
    """ Cache calculations, so that the first call to this function performs the
        calculations, and caches them to a file. Future calls to this function
        simply load up the data from the cached file.

    Args:
        filename:(str)
            the file path you want to save the cached file as
        func: (callable)
            The function to call to calculate the
        *args:
            ordered arguments to be passed on to func()
        **kwargs:
            keyword arguments to be passed on to func()

    Returns:
        Returns whatever func() returns.
    
    Examples:
        cache_calc("myCachedFile", myFunc)
    """
    # ==========================================================================
    if file_exists(filename):
        print("Loading the cached version of " + filename)
        with open(filename, mode="rb") as fileObj:
            x = pickle_load(fileObj)
    else:
        print("Caching the calculation to the file " + filename)
        x = func(*args, **kwargs)
        # Cache the calculation so future calls to this function load the cached
        # object instead.
        with open(filename, mode="wb") as fileObj:
            pickle_dump(x, fileObj)
    return x
Exemplo n.º 14
0
    def test_save_different_tracebacks(self):
        path_1 = join(LOG_DIRECTORY, exc_utils.EXCEPTION_FILE_NAME_1)
        path_2 = join(LOG_DIRECTORY, exc_utils.EXCEPTION_FILE_NAME_2)

        exception_info = exc_utils.get_exception_info_1()
        storage = FileStorage(LOG_DIRECTORY)
        storage.save("diff:first", exception_info)
        self.assertTrue(file_exists(path_1))

        exception_info = exc_utils.get_exception_info_2()
        storage = FileStorage(LOG_DIRECTORY)
        storage.save("diff:second", exception_info)
        self.assertTrue(file_exists(path_2))

        self.assertEqual("diff:first", self._get_file_contents(path_1))
        self.assertEqual("diff:second", self._get_file_contents(path_2))
Exemplo n.º 15
0
def PutPQR(molecule, filename, standard=False, title='Created by Python chemistry package'):
   """ Writes a PQR based on the PQR version 3 spec """

   # define an array with location of TER cards and call the function to initialize it
   ter_locations = []
   _needTer(molecule, ter_locations)

   # Define a counter for which atom we're printing out
   atom_counter = 1

   # Keep track of whether an atom is ATOM or HETATM so we know what to do for CONECTs
   rectypes = []

   # Keep counters for MASTER record
   numters   = 0
   numconect = 0

   # Make sure we aren't overwriting this file if we haven't said it's OK yet
   if file_exists(filename) and not overwrite:
      raise(exceptions.FileError('Cannot open %s for writing. It already exists.' % filename))

   # Open the file for writing and print out the title
   file = open(filename,'w',0)
   print >> file, "REMARK %s" % title
   print >> file, "REMARK %s" % molecule.title

   # Now print out each line for atom
   for i in range(len(molecule.atoms)):
      record, resname = _resname(molecule.residues[molecule.residue_container[i]], standard)
      rectypes.append(record)
      line = "%-6s %4d %4s %3s  %4i    %8.3f %7.3f %7.3f %7.4f %7.4f      %2s" % \
             (record, atom_counter % 10000, _format(molecule.atoms[i]), resname,
              molecule.residue_container[i]+1, molecule.coords[3*i], molecule.coords[3*i+1], 
              molecule.coords[3*i+2], molecule.charges[i], molecule.radii[i], molecule.elements[i])
      print >> file, line
      atom_counter += 1
      try:
         if molecule.residue_container[i] != molecule.residue_container[i+1] and \
               molecule.residue_container[i] in ter_locations:
            print >> file, 'TER'
            numters += 1
      except IndexError:
         pass

   atom_counter -= 1 # decrement atom counter, since it was incremented after the last atom
   # Now print out the CONECT cards if they are needed
   for i in range(len(rectypes)):
      if rectypes[i] == 'HETATM' and molecule.residues[molecule.residue_container[i]] != 'WAT':
         line = 'CONECT%5d' % (i + 1)
         for j in range(len(molecule.bonds[i])):
            line += '%5d' % (molecule.bonds[i][j] + 1)
         print >> file, line
         numconect += 1

   # Now print out the MASTER record
   print >> file, 'MASTER        1    0    0    0    0    0    0    0 %4d %4d %4d    0' % \
            (atom_counter, numters, numconect)

   # End the PQR
   print >> file, 'END'
Exemplo n.º 16
0
    def test_save_different_tracebacks(self):
        path_1 = join(LOG_DIRECTORY, exc_utils.EXCEPTION_FILE_NAME_1)
        path_2 = join(LOG_DIRECTORY, exc_utils.EXCEPTION_FILE_NAME_2)

        exception_info = exc_utils.get_exception_info_1()
        storage = FileStorage(LOG_DIRECTORY)
        storage.save("diff:first", exception_info)
        self.assertTrue(file_exists(path_1))

        exception_info = exc_utils.get_exception_info_2()
        storage = FileStorage(LOG_DIRECTORY)
        storage.save("diff:second", exception_info)
        self.assertTrue(file_exists(path_2))

        self.assertEqual("diff:first", self._get_file_contents(path_1))
        self.assertEqual("diff:second", self._get_file_contents(path_2))
Exemplo n.º 17
0
    def keys_registration(self):

        filepath = control.transPath(
            control.join(control.addon('plugin.video.youtube').getAddonInfo('profile'), 'api_keys.json'))

        setting = control.addon('plugin.video.youtube').getSetting('youtube.allow.dev.keys') == 'true'

        if file_exists(filepath):

            f = open(filepath)

            jsonstore = json.load(f)

            try:
                old_key_found = jsonstore['keys']['developer'][control.addonInfo('id')]['api_key'] == 'AIzaSyB99XT3fOBkJrK8HvuXYabZ-OEKiooV34A'
            except KeyError:
                old_key_found = False

            no_keys = control.addonInfo('id') not in jsonstore.get('keys', 'developer').get('developer') or old_key_found

            if setting and no_keys:

                keys = json.loads(decompress(b64decode(self.scramble)))
                register_api_keys(control.addonInfo('id'), keys['api_key'], keys['id'], keys['secret'])

            f.close()
Exemplo n.º 18
0
    def check_inputstream_addon():

        try:
            addon_enabled = control.addon_details('inputstream.adaptive').get('enabled')
        except KeyError:
            addon_enabled = False

        leia_plus = control.kodi_version() >= 18.0

        first_time_file = control.join(control.dataPath, 'first_time')

        if not addon_enabled and not file_exists(first_time_file) and leia_plus:

            try:

                yes = control.yesnoDialog(control.lang(30014))

                if yes:
                    control.enable_addon('inputstream.adaptive')
                    control.infoDialog(control.lang(30402))

                with open(first_time_file, 'a'):
                    pass

            except Exception:

                pass
Exemplo n.º 19
0
def get_chromedriver_location():
    """ Solve chromedriver access issues """
    CD = Settings.chromedriver_location

    if OS_ENV == "windows":
        if not CD.endswith(".exe"):
            CD += ".exe"

    if not file_exists(CD):
        workspace_path = slashen(WORKSPACE["path"], "native")
        assets_path = "{}{}assets".format(workspace_path, native_slash)
        validate_path(assets_path)

        # only import from this package when necessary
        from instapy_chromedriver import binary_path

        CD = binary_path
        chrome_version = pkg_resources.get_distribution(
            "instapy_chromedriver").version
        message = "Using built in instapy-chromedriver executable (version {})".format(
            chrome_version)
        highlight_print(Settings.profile["name"], message, "workspace", "info",
                        Settings.logger)

    # save updated path into settings
    Settings.chromedriver_location = CD
    return CD
Exemplo n.º 20
0
def main():
    """Main method executed when run"""

    if len(argv) != 2:
        print("Usage: flymirror.py [rules_file]")
        return

    if not file_exists(argv[1]):
        print("Error: rules file", argv[1], "does not exist.")
        return

    config = read_config(argv[1])
    URLS.put(config.start)

    # Start the loops in another thread
    # include 2 extra threads for the loopers
    pool = ThreadPoolExecutor(int(config.workers) + 2)
    perfprint("[START]")
    pool.submit(download_loop, pool)
    pool.submit(handle_response_loop, pool, config)

    # Join on both the queues at once (Yeah, this is hacky -- may break in later versions)
    while URLS.unfinished_tasks or RESPONSES.unfinished_tasks:
        sleep(0.3)

    # Shut everything down (may take 1 second)
    DONE.put(True)
    pool.shutdown()
    perfprint("[END]")
Exemplo n.º 21
0
 def test_file_write(self):
     # Check that the file exists after writing it
     write_text_file(file_name="test", file_format=".txt", file_lines="test string only", verbose=True)
     self.assertTrue(file_exists("test.txt"))
     # Check if the one string we passed is written correctly
     with open("test.txt", "r") as file:
         file_data = file.readline().strip()
     self.assertEqual("test string only", file_data)
     # Pass an empty list and see if it fails as expected
     state = write_text_file(file_name="test", file_format=".txt", file_lines=[], verbose=True)
     self.assertFalse(state)
     # Write one line, but passed as a list
     write_text_file(file_name="test", file_format=".txt", file_lines=["One line given"], verbose=True)
     with open("test.txt", "r") as file:
         file_data = file.readline().strip()
     self.assertEqual("One line given", file_data)
     # write a list of strings
     test_lines = ["hello", "my", "name", "is", "Simon"]
     write_text_file(file_name="test", file_format=".txt", file_lines=test_lines, verbose=True)
     # Read the lines and compare with the lines written
     with open("test.txt", "r") as file:
         file_data = file.readlines()
     for line_number in range(len(test_lines)):
         self.assertEqual(test_lines[line_number], file_data[line_number].strip())
     # Clean up after the test
     remove_file("test.txt")
Exemplo n.º 22
0
def read_last_session(
    file: Union[str, Path]) -> Optional[List[List[Dict[str, Any]]]]:
   if not file_exists(file):
      return None

   with open_lz4(file) as fd:
      data = json.load(fd)

   schema_version = data['version']
   if schema_version != ['sessionrestore', 1]:
      raise util.UnsupportedSchema(file, schema_version)

   windows = []
   for window in data['windows']:
      tabs = []
      for tab in window['tabs']:
         # current entry in the tab, others are history
         current_entry = tab['entries'][0]

         tabs.append({
             'index': tab['index'],
             'title': current_entry['title'],
             'url': current_entry['url'],
             'container': tab['userContextId'],
             'last-accessed': tab['lastAccessed']
         })

      windows.append(tabs)

   return windows
Exemplo n.º 23
0
 def file_open(self):
     while True:
         path = QtGui.QFileDialog.getOpenFileName(
             self, 'Open MIDI sound set',
             QtGui.QDesktopServices.storageLocation(
                 QtGui.QDesktopServices.HomeLocation),
             'MIDI files (*.mid);;All files (*)')
         if not path: return
         if not file_exists(str(path)):
             QtGui.QMessageBox.warning(
                 self, 'File does not exists',
                 'The selected does not exist.\nCheck the file name and path.'
             )
         else:
             try:
                 res = self.midi_load(path)
                 if not res:
                     retry = QtGui.QMessageBox.information(
                         self, 'No sounds found',
                         'It looks like the selected file does not contain any sound.\nTry with another file?',
                         QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
                     if retry != QtGui.QMessageBox.Yes: return
                 break
             except:
                 QtGui.QMessageBox.warning(
                     self, 'Unexpected error',
                     'Something is wrong with the selected file...\nTry with another one.'
                 )
     self.setSource(res, path)
Exemplo n.º 24
0
def create_conf():
    config_f = configparser.ConfigParser(allow_no_value=True)
    config_f["autospec"] = {}
    for fname, comment in sorted(config_options.items()):
        config_f.set("autospec", "# {}".format(comment))
        if file_exists(fname):
            config_f["autospec"][fname] = "true"
            os.remove(fname)
        else:
            config_f["autospec"][fname] = "false"

    # renamed options need special care
    if file_exists("skip_test_suite"):
        config_f["autospec"]["skip_tests"] = "true"
        os.remove("skip_test_suite")
    write_config(config_f)
Exemplo n.º 25
0
 def __init__(self, filepath, new_contents=None):
     self._filepath = filepath
     # Allow initializing new file from provided contents
     if file_exists(filepath):
         self._contents = self._read()
     elif new_contents:
         self._contents = new_contents
Exemplo n.º 26
0
def cache_exists(cache_metadata, function_name, *args, **kwargs):
    if function_name not in cache_metadata:
        return False, None
    new_caches_for_function = []
    cache_changed = False
    for function_cache in cache_metadata[function_name]:
        if function_cache['args'] == str(args) and (function_cache['kwargs']
                                                    == str(kwargs)):
            max_age_days = int(function_cache['max_age_days'])
            file_name = join_path(DISK_CACHE_DIR, function_cache['file_name'])
            if file_exists(file_name):
                if get_age_of_file(
                        file_name) > max_age_days != UNLIMITED_CACHE_AGE:
                    os.remove(file_name)
                    cache_changed = True
                else:
                    function_value = unpickle_big_data(file_name)
                    return True, function_value
            else:
                cache_changed = True
        else:
            new_caches_for_function.append(function_cache)
    if cache_changed:
        if new_caches_for_function:
            cache_metadata[function_name] = new_caches_for_function
        else:
            cache_metadata.pop(function_name)
        write_cache_file(cache_metadata)
    return False, None
Exemplo n.º 27
0
def cache_calc(filename, func, *args, **kwargs):
    """
    Cache calculations, so that the first call to this function performs the
    calculations, and caches them to a file. And future calls to this function
    simply load up the data from the cached file.

    :param filename:(str) the file path you want to save the cached file as
    :param func: The function to call to calculate the
    :param *args: ordered arguments to be passed on to func()
    :param **kwargs: keyword arguments to be passed on to func()
    :return: whatever func() returns.

    :examples:
        cache_calc("myCachedFile", myFunc)
    """
    # ==========================================================================
    if file_exists(filename):
        print("Loading the cached version of " + filename)
        with open(filename, mode="rb") as fileObj:
            x = pickle_load(fileObj)
    else:
        print("Caching the calculation to the file " + filename)
        x = func(*args, **kwargs)
        # Cache the calculation so future calls to this function load the cached
        # object instead.
        with open(filename, mode="wb") as fileObj:
            pickle_dump(x, fileObj)
    return x
Exemplo n.º 28
0
def get_state_vars():
    curl_file = 'curl.sh'
    assert file_exists(curl_file), '`{}` file not found'.format(curl_file)
    with open(curl_file) as fd:
        data = dict(arg.split('=', maxsplit=1) for arg in re.search("--data '([^']*)'", fd.read()).group(1).split('&'))
    assert data, 'could not parse curl arguments'
    return unquote(data['__VIEWSTATE'].strip()), unquote(data['__EVENTVALIDATION'].strip())
Exemplo n.º 29
0
def delete_old_disk_caches():
    cache_metadata = load_cache_metadata_json()
    new_cache_metadata = deepcopy(cache_metadata)
    cache_changed = False
    for function_name, function_caches in cache_metadata.items():
        if function_name == _TOTAL_NUMCACHE_KEY:
            continue
        to_keep = []
        for function_cache in function_caches:
            max_age_days = int(function_cache['max_age_days'])
            file_name = join_path(DISK_CACHE_DIR, function_cache['file_name'])
            if not file_exists(file_name):
                cache_changed = True
                continue
            if not get_age_of_file(
                    file_name) > max_age_days != UNLIMITED_CACHE_AGE:
                to_keep.append(function_cache)
                continue
            logger.info('Removing stale cache file %s, > %d days', file_name,
                        max_age_days)
            cache_changed = True
            os.remove(file_name)
        if to_keep:
            new_cache_metadata[function_name] = to_keep
    if cache_changed:
        write_cache_file(new_cache_metadata)
Exemplo n.º 30
0
def keys_registration():

    filepath = control.transPath(
        control.join(
            control.addon('plugin.video.youtube').getAddonInfo('profile'),
            'api_keys.json'))

    setting = control.addon('plugin.video.youtube').getSetting(
        'youtube.allow.dev.keys') == 'true'

    if file_exists(filepath):

        f = open(filepath)

        jsonstore = json.load(f)

        try:
            old_key_found = jsonstore['keys']['developer'][control.addonInfo(
                'id')]['api_key'] == 'AIzaSyCE6qoV77uQMWR6g2mIVzjQs8wtqqa_KyM'
        except KeyError:
            old_key_found = False

        no_keys = control.addonInfo('id') not in jsonstore.get(
            'keys', 'developer').get('developer') or old_key_found

        if setting and no_keys:

            keys = json.loads(decompress(b64decode(scramble)))

            register_api_keys(control.addonInfo('id'), keys['api_key'],
                              keys['id'], keys['secret'])

            control.sleep(200)

        f.close()
Exemplo n.º 31
0
    def is_valid_profile(path: Union[str, Path]) -> bool:
        # checking if any of the files does not exist
        for file in [PLACES, COOKIES, EXTENSIONS]:
            if not file_exists(join_path(path, file)):
                return False

        return True
Exemplo n.º 32
0
def read_bookmarks(file: Union[str, Path]) -> Optional[Bookmark]:
    if not file_exists(file):
        return None

    with open(file) as fd:
        data = json.load(fd)

    schema_version = data['version']
    if schema_version != 1:
        raise util.UnsupportedSchema(file, schema_version)

    def recursive(bookmark):  # type: ignore
        title = bookmark['name']
        date_added = dt_from_webkit_epoch(bookmark['date_added'])

        if bookmark['type'] == 'folder':
            return Bookmark.new_folder(
                title,
                date_added, [recursive(i) for i in bookmark['children']],
                date_modified=dt_from_webkit_epoch(bookmark['date_modified']))

        return Bookmark.new(bookmark['url'], title, date_added)

    roots = data['roots']

    toolbar = recursive(roots['bookmark_bar'])
    other = recursive(roots['other'])
    synced = recursive(roots['synced'])

    # NOTE when changing keep the order in sync with firefox/reader.py
    return Bookmark.new_folder('root', datetime.datetime.now(),
                               [toolbar, other, synced])
Exemplo n.º 33
0
def dev():
    # Allow this to persist, since we aren't as rigorous about keeping state clean
    if not file_exists('.denv'):
        local('virtualenv .denv')
 
    with virtualenv(DEV_ENV_DIR):
        local('pip install -r requirements.txt')
Exemplo n.º 34
0
    def __init__(self, model, device, max_dist=0.2, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100):
        # models trained on: market1501, dukemtmcreid and msmt17
        if is_model_in_factory(model):
            # download the model
            model_path = join('deep_sort/deep/checkpoint', model + '.pth')
            if not file_exists(model_path):
                gdown.download(get_model_link(model), model_path, quiet=False)

            self.extractor = FeatureExtractor(
                # get rid of dataset information DeepSort model name
                model_name=model.rsplit('_', 1)[:-1][0],
                model_path=model_path,
                device=str(device)
            )
        else:
            if is_model_type_in_model_path(model):
                model_name = get_model_type(model)
                self.extractor = FeatureExtractor(
                    model_name=model_name,
                    model_path=model,
                    device=str(device)
                )
            else:
                print('Cannot infere model name from provided DeepSort path, should be one of the following:')
                show_supported_models()
                exit()

        self.max_dist = max_dist
        metric = NearestNeighborDistanceMetric(
            "cosine", self.max_dist, nn_budget)
        self.tracker = Tracker(
            metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
Exemplo n.º 35
0
def load_VIS_LSTM(model_save_file=model_save_file):
    if file_exists(model_save_file):
        print("Model already exists. Loading...")
        vis_lstm_model = load_model(model_save_file)
    else:
        vis_lstm_model = VIS_LSTM()
    return vis_lstm_model
Exemplo n.º 36
0
def data_preprocess():
    """Data preprocess function
  for data preprocessing and caching"""
    if file_exists(data_preprocess_file):
        with open(data_preprocess_file, 'rb+') as f:
            data_tup = pickle.load(f)
    else:
        ## Extract the sparse matrix data
        f = h5py.File('hidden_oxford_mscoco.h5', 'r', encoding='bytes')
        data = f['hidden7_data'][:]
        shape = f['hidden7_shape'][:]
        indices = f['hidden7_indices'][:]
        ptr = f['hidden7_indptr'][:]

        ## Create the matrix by passing the data
        mat = sparse.csr_matrix((data, indices, ptr), shape=shape)
        img_array = mat.toarray()

        ## Load the dictionary from the vocabulary
        vocab_dict = np.load('cocoqa/' + 'vocab-dict.npy', encoding='bytes')
        quest_dict = vocab_dict[0]
        quest_word_arr = vocab_dict[1]
        ans_dict = vocab_dict[2]
        ans_word_arr = vocab_dict[3]

        ## Create the tuple
        data_tup = (img_array, quest_dict, quest_word_arr, ans_dict,
                    ans_word_arr)
        with open(data_preprocess_file, 'wb+') as f:
            pickle.dump(data_tup, f)

    return data_tup
Exemplo n.º 37
0
def env():
    if file_exists(VENV_DIR):
        local('rm -rf {env}'.format(env=VENV_DIR))
    local('python -m virtualenv {env}'.format(env=VENV_DIR))
    with virtualenv(VENV_DIR):
        local('python -m pip install --upgrade setuptools')
        local('python -m pip install -r requirements.txt')
        local('python -m pip install pytest')
def programming_exercise_1():
    from os.path import exists as file_exists
    numbers_file = 'numbers.txt'
    if not file_exists(numbers_file):
        create_numbers_file(numbers_file)
    with open(numbers_file, 'r') as nfh:
        for line in nfh:
            print(line.rstrip('\n'))
Exemplo n.º 39
0
    def test_save(self):
        exception_info = exc_utils.get_exception_info_1()

        storage = FileStorage(LOG_DIRECTORY)
        storage.save("test_save", exception_info)

        path = join(LOG_DIRECTORY, exc_utils.EXCEPTION_FILE_NAME_1)
        self.assertTrue(file_exists(path))
Exemplo n.º 40
0
 def _import_pkgs(self):
     if re_get is not None:
         self.requests_installed = True
     if StreamData is not None:
         self.livestreamer_installed = True
         for path in self.possible_paths:
             if file_exists(path):
                 self.livestreamer_path = path
Exemplo n.º 41
0
def env():
    if file_exists(VENV_DIR):
        local('rm -rf {env}'.format(env=VENV_DIR))
    local('virtualenv {env}'.format(env=VENV_DIR))
    with virtualenv(VENV_DIR):
        local('pip install --upgrade setuptools')
        local('pip install -r requirements.txt')
        local('pip install pytest')
Exemplo n.º 42
0
def render_page(renderer, template, **context):
    load_data()
    for lang, context in data_contexts.items():
        outfile = join(SITE_DIR, context['lang_dir'], template.name)
        head = dirname(outfile)
        if head and not file_exists(head):
            makedirs(head)
        print(PROMPT_FMT_HTML % (context['lang'], outfile))
        template.stream(context).dump(outfile, "utf-8")
Exemplo n.º 43
0
    def setup_netrc(self, opener):
        """Setup netrc file

        :paramt opener: :class:`pycurl.Curl` object
        """
        if self._netrc:
            opener.setopt(pycurl.NETRC, 1)

        if self._netrc_file and file_exists(self._netrc_file):
            opener.setopt(pycurl.NETRC_FILE, self._netrc_file)
Exemplo n.º 44
0
    def _write_pid_file(self, pidfile):
        self._create_dir(dirname(pidfile))

        if file_exists(pidfile):
            raise UnixSetupError('Error - \'{:}\' exists. Process already running ?.'.format(pidfile))

        try:
            with open(pidfile, 'w') as fd:
                fd.write(str(getpid()))
        except IOError, e:
            raise UnixSetupError('Error - Couldn\'t write pidfile \'{:}\'. Details : {:}.'.format(pidfile, e))
Exemplo n.º 45
0
    def try_load_or_create_new(session_user_id):
        """Loads a saved session_user_id session, or creates a new one if none existed before.
           If the given session_user_id is None, we assume that it is for testing purposes"""
        if session_user_id is None:
            return Session(None)
        else:
            path = '{}.session'.format(session_user_id)

            if file_exists(path):
                with open(path, 'rb') as file:
                    return pickle.load(file)
            else:
                return Session(session_user_id)
Exemplo n.º 46
0
def load_data(dataFrame="Feat_normalized.csv") :
    '''
    Load training data from csv file.  Load labels from it.
    Return matrix, training labels, encoder for labels.
    label_encoder uses transforming textual labels to integer and vice versa:
    label_encoder.inverse_transform(0) => 'Mammal_melanosome_0'
    label_encoder.transform('Mammal_melanosome_0') => 0
    '''
    label_exists = False
    #Tracer()() #TO REMOVE!!!
    if type(dataFrame) == type(''):
        if len(dataFrame) > 120 or not file_exists(dataFrame):
            #If it a string in format of csv and not a filename
            dataFrame = StringIO(dataFrame)
        #Load file
        df = pd.read_csv(dataFrame, delimiter='\t', header=0)
        try:
            df.set_index(keys = ['accession', 'classname'], inplace=True)
            #df = pd.read_csv(dataFrame, delimiter='\t', header=0, index_col=['accession', 'classname'])
            label_exists = True
        #When not labeled
        except KeyError:
            print('Features files does not contains labels')
            #df = pd.read_csv(dataFrame, delimiter='\t', header=0, index_col='accession')
            df.set_index(keys = 'accession', inplace=True)
    else:
        df = dataFrame

    features = df.values
    # M: creates numpy array
    feature_names=df.columns.values
    print("%s features" % (len(feature_names)))
    # print("feature_names: %s" %(feature_names))

    if label_exists:
        # create an object of scikit label encoder that transforms strings to ints
        label_encoder = LabelEncoder()
        # M: check if works with multiindex
        # M: take only label, not protein name

        s = df.index.get_level_values('classname')
        #print(s.value_counts)

        labels = label_encoder.fit_transform((df.index.get_level_values('classname').values))
        #print ("labels: %s %s" %(type(labels),labels))
        print("labels List: ",list(label_encoder.classes_))
        # M: creates numpy matrix (or array)
        return (features, labels, label_encoder, feature_names)
    accessions = df.index.get_level_values('accession')
    #To change the order!! (and in the calling functions)
    return features, accessions, feature_names
    def do_GET(s): # my function. reem
        """Respond to a GET request."""
        img_string = s.path[1:]
        # If someone went to "http://something.somewhere.net/foo/bar/",
        # then s.path equals "/foo/bar/".

        if img_string: # if not empty string
            img_path = to_real_path(img_string)
            if file_exists(img_path):
                MyHandler.send_file(s, img_path)
                return

        # reached here? there is a problem with the file
        MyHandler.send_nothing(s)
Exemplo n.º 48
0
 def test_calling_method_dump_should_pickle_the_index_object(self):
     fp = NamedTemporaryFile(delete=False)
     fp.close()
     self.filename = fp.name
     index = Index()
     index.add_document('coffee', 'I liked it')
     index.add_document('water', 'I need it')
     index.dump(self.filename)
     self.assertTrue(file_exists(self.filename))
     fp = open(self.filename)
     retrieved_index = cPickle.load(fp)
     self.assertEquals(len(retrieved_index), 2)
     self.assertEquals(set(retrieved_index._index.keys()),
                       set(['i', 'liked', 'need', 'it']))
Exemplo n.º 49
0
    def _write_pid_file(self, pidfile, user, group):
        self._create_dir(dirname(pidfile))

        if file_exists(pidfile):
            raise UnixSetupError('Error - \'{:}\' exists. Process already running ?.'.format(pidfile))

        try:
            with open(pidfile, 'w') as fd:
                fd.write(u'{:}'.format(getpid()))

            chown(pidfile, getpwnam(user).pw_uid, getgrnam(group).gr_gid)
        except IOError as e:
            raise UnixSetupError('Error - Couldn\'t write pidfile \'{:}\'. Details : {:}.'.format(pidfile, e))
        except OSError as e:
            raise UnixSetupError('Error - Couldn\'t change permissions for pidfile \'{:}\'. Details : {:}.'.format(pidfile, e))
Exemplo n.º 50
0
    def test_override_duplicate_traceback(self):
        """Only the freshest traceback is stored."""
        path = join(LOG_DIRECTORY, exc_utils.EXCEPTION_FILE_NAME_1)
        exception_info = exc_utils.get_exception_info_1()

        storage = FileStorage(LOG_DIRECTORY)
        storage.save("duplicate:first", exception_info)
        self.assertTrue(file_exists(path))

        # duplicated exception
        exception_info = exc_utils.get_exception_info_1("arg", 25.3, self)
        storage = FileStorage(LOG_DIRECTORY)
        storage.save("duplicate:second", exception_info)

        self.assertEqual("duplicate:second", self._get_file_contents(path))
Exemplo n.º 51
0
def GetPDB(filename):
   """ Read in a PDB file and return a molecule object """
   if not file_exists(filename):
      raise(exceptions.FileError('%s does not exist' % filename))

   raise(exceptions.FileError("PDB Reading not yet implemented"))

   # number of atoms in the system
   natom = 0

   # Open the file and parse it
   file = open(filename,'r')
   for line in file:
      if line[0:6] != 'ATOM  ' or line[0:6] != 'HETATM':
         natom += 1
         pass
Exemplo n.º 52
0
def cli(carto, file=None):
	if file:
		text = "load {}".format(file)
	else:
		text = ""
	while not text[:4] in ("exit", "quit"):
		print_state = True
		if text and " " in text:
			cmd, args = text.strip().split(" ", 1)
			if cmd.isdigit():
				if args == '""':
					args = ''
				carto.expand(int(cmd), args)
			else:
				if cmd == "load":
					if file_exists(args):
						with open(args, "r") as fd:
							carto.from_dot(fd.read())
						print("Cartographer loaded from {}".format(args))
				elif cmd == "new":
					qid = carto.qid
					carto.qid += 1
					carto.expand(qid, args)
					carto.questions[""] = qid
				elif cmd == "save":
					with open(args, "w") as fd:
						fd.write(carto.to_dot())
					print("Cartographer saved to {}".format(args))
					print_state = False
				else:
					print("Command unknown")
					print_state = False
		else:
			print("Command unknown")
			print_state = False
		if print_state:
			if file:
				with open(file, "w") as fd:
					fd.write(carto.to_dot())
					fd.flush()
					fsync(fd.fileno())
			else:
				print("\n".join("{}\t{}".format(qid, node.question) for qid, node in carto.nodes.items() if not node.answer))
		print("")
		text = input("> ")
Exemplo n.º 53
0
def read_config_opts():
    global config_opts
    if not file_exists("options.conf"):
        create_conf()

    config_f = configparser.ConfigParser()
    config_f.read("options.conf")
    if "autospec" not in config_f.sections():
        print("Missing autospec section in options.conf")
        sys.exit(1)

    for key in config_f["autospec"]:
        config_opts[key] = config_f["autospec"].getboolean(key)

    # Rewrite the configuration file in case of formatting changes since a
    # configuration file may exist without any comments (either due to an older
    # version of autospec or if it was user-created)
    rewrite_config_opts()
Exemplo n.º 54
0
def prebuild(build_dir='/tmp/build_spacy'):
    if file_exists(build_dir):
        shutil.rmtree(build_dir)
    os.mkdir(build_dir)
    spacy_dir = path.dirname(__file__)
    wn_url = 'http://wordnetcode.princeton.edu/3.0/WordNet-3.0.tar.gz'
    build_venv = path.join(build_dir, '.env')
    with lcd(build_dir):
        local('git clone %s .' % spacy_dir)
        local('virtualenv ' + build_venv)
        with prefix('cd %s && PYTHONPATH=`pwd` && . %s/bin/activate' % (build_dir, build_venv)):
            local('pip install cython fabric fabtools pytest')
            local('pip install -r requirements.txt')
            local('fab clean make')
            local('cp -r %s/corpora/en/wordnet corpora/en/' % spacy_dir)
            local('cp %s/corpora/en/freqs.txt.gz corpora/en/' % spacy_dir)
            local('PYTHONPATH=`pwd` python bin/init_model.py en lang_data corpora spacy/en/data')
            local('fab test')
            local('python setup.py sdist')
def collect_tp(graph, folder):
    tps = []
    gap = 1000
    min_start = sys.maxint
    min_end = sys.maxint
    for client in graph['nodes']:
        tp_file = folder + 'throughput_client_conservative_overall_' + str(client) + '.log'
        if not file_exists(tp_file): continue
        f = open(tp_file)
        reduce_factor = 1
        records = []
        start = None
        end = None
        for line in f:
            if line.startswith('#'):
                if "(ns)" in line:
                    reduce_factor = 1000000
                elif "(cps)" in line:
                    reduce_factor = 1000
                continue
            tmp = [float(field) for field in line.strip().split()]
            if start == None: start = tmp[0]
            end = tmp[0]
            records.append([tmp[0], (tmp[2] / reduce_factor)])
        f.close()

        if min_start > start: min_start = start
        if min_end > end: min_end = end
        tps.append(records)
    ret = []
    t = min_start
    while t < min_end:
        sum = 0
        for records in tps:
            for record in records:
                if (record[0] <= t + gap):
                    sum += record[1]
                    records.remove(record)
                else:
                    break
        ret.append([(t - min_start) / 1000, sum])
        t += gap
    return ret
Exemplo n.º 56
0
    def try_load_or_create_new(session_user_id):
        """Loads a saved session_user_id.session or creates a new one.
           If session_user_id=None, later .save()'s will have no effect.
        """
        if session_user_id is None:
            return JsonSession(None)
        else:
            path = '{}.session'.format(session_user_id)
            result = JsonSession(session_user_id)
            if not file_exists(path):
                return result

            try:
                with open(path, 'r') as file:
                    data = json.load(file)
                    result.id = data['id']
                    result.port = data['port']
                    result.salt = data['salt']
                    result._sequence = data['sequence']
                    result.time_offset = data['time_offset']
                    result.server_address = data['server_address']

                    # FIXME We need to import the AuthKey here or otherwise
                    # we get cyclic dependencies.
                    from ..crypto import AuthKey
                    if data['auth_key_data'] is not None:
                        key = b64decode(data['auth_key_data'])
                        result.auth_key = AuthKey(data=key)

            except (json.decoder.JSONDecodeError, UnicodeDecodeError):
                # TODO Backwards-compatibility code
                old = Session.try_load_or_create_new(session_user_id)
                result.id = old.id
                result.port = old.port
                result.salt = old.salt
                result._sequence = old.sequence
                result.time_offset = old.time_offset
                result.server_address = old.server_address
                result.auth_key = old.auth_key
                result.save()

            return result
Exemplo n.º 57
0
def read_history():
    history = set()
    if file_exists(SHELL_HISTORY):
        for shistory in listdir(SHELL_HISTORY):
            if not shistory.endswith('.shistory'):
                continue
            with open(join_path(SHELL_HISTORY, shistory)) as fd:
                for entry in fd.readlines():
                    # only take COMMAND from 'DATE HOST PWD COMMAND'
                    entry = entry.strip().split("\t", maxsplit=3)[-1].strip()
                    for line in entry.split(";"):
                        for conjunction in line.split("|"):
                            for command in conjunction.split("&&"):
                                if not is_weird(command) and len(command.split()) > 0:
                                    history.add(command.strip())
        # filter out commands that begin with shell keywords
        history = set(command for command in history if command.split()[0] not in KEYWORDS)
        # filter out commands that begin with non-alphabet characters
        history = set(command for command in history if re.match("[a-z]", command))
    return history
Exemplo n.º 58
0
    def _check_migrate_json(self):
        if file_exists(self.filename):
            try:
                with open(self.filename, encoding='utf-8') as f:
                    data = json.load(f)
                self.delete()  # Delete JSON file to create database

                self._port = data.get('port', self._port)
                self._server_address = \
                    data.get('server_address', self._server_address)

                if data.get('auth_key_data', None) is not None:
                    key = b64decode(data['auth_key_data'])
                    self._auth_key = AuthKey(data=key)

                rows = []
                for p_id, p_hash in data.get('entities', []):
                    if p_hash is not None:
                        rows.append((p_id, p_hash, None, None, None))
                return rows
            except UnicodeDecodeError:
                return []  # No entities