Пример #1
0
def find_one(path, jobname=None):
  """ Returns an executabe job-folder as a context. 

      The goal is to retrieve an executable job-folder from disk, while using a
      lock-file to prevent other processes from reading/writing the job-folder
      at an importune time. The lock-file is not held throughout the existence
      of the context. It is only held for specific operations when entering and
      leaving the context. The function is a context_. As such, it is possible
      to modify the job-folder. The modifications will be saved when leaving
      the context.

      .. _context: http://docs.python.org/reference/datamodel.html#context-managers
  """ 
  from ..misc import LockFile

  found = False
  # acquire lock. Yield context outside of lock!
  with LockFile(path) as lock:
    # Loads pickle.
    with open(self._filename, 'r') as file: jobfolder = pickle_load(file)
    # Finds first untagged job.
    if jobname is None:
      for job in jobfolder.itervalues():
        if not job.is_tagged:
          found = True
          break
    else: job = jobfolder[jobname]
    # tag the job before surrendering the lock.
    if found and jobname is None:
      job.tag()
      with open(self._filename, 'w') as file: dump(jobfolder, file)

  # Check we found an untagged job. Otherwise, we are done.
  if not found:
    yield None
    return
    
  # context returns jobs.
  yield job

  # saves job since it might have been modified
  # acquire a lock first.
  with LockFile(self._filename) as lock:
    # Loads pickle.
    with open(self._filename, 'r') as file: jobfolder = pickle_load(file)
    # modifies job.
    jobfolder[job.name] = job
    # save jobs.
    with open(self._filename, 'w') as file: dump(jobfolder, file)
Пример #2
0
    def an_092b_anpassen(self):
        if self.mb.debug:
            log(inspect.stack)

        # Tags_time in den Sidebar dict eintragen
        try:
            from pickle import load as pickle_load

            pfad = os.path.join(self.mb.pfade["files"], "sidebar_content.pkl")
            with open(pfad, "rb") as f:
                self.mb.dict_sb_content = pickle_load(f)

            dict_sb_content = self.mb.dict_sb_content

            dict = {}
            dict.update({"zeit": None})
            dict.update({"datum": None})

            for ordn in dict_sb_content["ordinal"]:
                if dict_sb_content["ordinal"][ordn]["Tags_time"] == "":
                    self.mb.dict_sb_content["ordinal"][ordn]["Tags_time"] = dict

            self.mb.class_Sidebar.speicher_sidebar_dict()
            self.version = "0.9.2b"
        except:
            log(inspect.stack, tb())
Пример #3
0
def load_run(model, run_path):
    """
    Load a saved model and (if existing) additional data from disk.
    :param model: the model to be updated with the saved state-dict
    :param run_path: the path to the folder containing the files to load
    :return: the model and, optionally, additional data
    """
    if not run_path.is_dir():
        raise NotADirectoryError(f"Invalid run-directory path.")

    model_files = [
        file for file in run_path.iterdir() if file.suffix == ".pkt"
    ]
    data_files = [file for file in run_path.iterdir() if file.suffix == ".pkl"]

    if len(model_files) == 0:
        raise ValueError(
            f"There are no model files to load in {run_path.name}.")

    if len(model_files) > 1 or len(data_files) > 1:
        raise ValueError(
            f"There exists more than one model- and/or data file in {run_path.name}."
        )

    model.load_state_dict(load(model_files[0]))
    print(f"Successfully loaded model data from {model_files[0].name}")

    if data_files:
        with open(data_files[0], 'rb') as data_file:
            data = pickle_load(data_file)
        print(f"Successfully loaded data from {data_files[0].name}")
        return model, data

    return model
Пример #4
0
def usePickleToMeta():
    if azienda:
        meta_pickle = azienda + "-meta.pickle" + sys.version[:1]
        promogestDir = os.path.expanduser("~") + os.sep + "promogest2" + os.sep + azienda + os.sep
        meta_pickle_da_conf = main_conf.Database.azienda + "-meta.pickle" + sys.version[:1]
        promogestDir_da_conf = (
            os.path.expanduser("~") + os.sep + "promogest2" + os.sep + main_conf.Database.azienda + os.sep
        )
    else:
        meta_pickle = None
        meta_pickle_da_conf = None
        promogestDir_da_conf = None
    if (
        tipo_eng != "sqlite"
        and azienda
        and os.path.exists(str(os.path.join(promogestDir.replace("_", ""), meta_pickle.replace("_", "")).strip()))
    ):
        print(" CONTROLLO DELL'ESISTENZA DEL FILE PICKLE", str(os.path.join(promogestDir.replace("_", ""))))
        with open(str(os.path.join(promogestDir.replace("_", ""), meta_pickle.replace("_", "")).strip()), "rb") as f:
            try:
                meta = pickle_load(f)
                meta.bind = engine
            except:
                print("DEVO CANCELLARE IL PICKLE PERCHé NON RIESCO A TROVARLO O LEGGERLO")
                delete_pickle()
            print("USO META PICKLE FAST")
            if not meta:
                meta = MetaData(engine)
    else:
        print("USO META NORMALE")
        meta = MetaData(engine)
    return meta
Пример #5
0
def init_session(remote_address, cookie_data=None, cookie=None):
    if cookie_data is not None:
        cookie = SessionCookie.load(cookie_data)

    # Default sid for the session
    sid = sha224(('%s-%s' % (remote_address, datetime.utcnow())).encode()).hexdigest()
    if cookie is None:
        cookie = SessionCookie(sid)
    else:
        try:
            cookie.get_sid()
        except KeyError:
            # For some reason the cookie did not contain a SID, set to default
            cookie.set_sid(sid)

    # Set the session singleton (there can be only one!)
    global CURRENT_SESSION
    ppath = get_session_pickle_path(cookie.get_sid())
    if isfile(ppath):
        # Load our old session data and initialise the cookie
        try:
            with open(ppath, 'rb') as session_pickle:
                CURRENT_SESSION = pickle_load(session_pickle)
            CURRENT_SESSION.init_cookie(CURRENT_SESSION.get_sid())
        except Exception as e:
            # On any error, just create a new session
            CURRENT_SESSION = Session(cookie)
    else:
        # Create a new session
        CURRENT_SESSION = Session(cookie)
Пример #6
0
    def lade_Backup(self,fehlende = 'all'): 
        if self.mb.debug: log(inspect.stack,None,'### Attention ###, sidebar_content.pkl.backup loaded!')
        
        pfad_Backup = os.path.join(self.mb.pfade['files'],'sidebar_content.pkl.Backup')
        
        if fehlende == 'all':
            fehlende = list(self.mb.props['Projekt'].dict_bereiche['ordinal'])
        
        try:
            with open(pfad_Backup, 'rb') as f:
                backup = pickle_load(f)
        except:
            log(inspect.stack,tb())

            for f in fehlende:
                self.lege_dict_sb_content_ordinal_an(f)
            return

        helfer = fehlende[:]  
        
        if backup != None:  
            for f in fehlende:
                if f in backup['ordinal']:
                    self.mb.dict_sb_content['ordinal'].update(backup['ordinal'][f])
                    helfer.remove(f)
        
        fehlende = helfer      
        for f in fehlende:
            self.lege_dict_sb_content_ordinal_an(f)
Пример #7
0
def cache_calc(filename, func, *args, **kwargs):
    """
    Cache calculations, so that the first call to this function performs the
    calculations, and caches them to a file. And future calls to this function
    simply load up the data from the cached file.

    :param filename:(str) the file path you want to save the cached file as
    :param func: The function to call to calculate the
    :param *args: ordered arguments to be passed on to func()
    :param **kwargs: keyword arguments to be passed on to func()
    :return: whatever func() returns.

    :examples:
        cache_calc("myCachedFile", myFunc)
    """
    # ==========================================================================
    if file_exists(filename):
        print("Loading the cached version of " + filename)
        with open(filename, mode="rb") as fileObj:
            x = pickle_load(fileObj)
    else:
        print("Caching the calculation to the file " + filename)
        x = func(*args, **kwargs)
        # Cache the calculation so future calls to this function load the cached
        # object instead.
        with open(filename, mode="wb") as fileObj:
            pickle_dump(x, fileObj)
    return x
Пример #8
0
    def load(self):
        fileName = config.plugins.birthdayreminder.file.value
        print("[Birthday Reminder] reading from file", fileName)

        tmpList = []
        if isfile(fileName):
            try:
                f = open(fileName, "r")
                tmpList = pickle_load(f)
                f.close()
            except IOError as xxx_todo_changeme1:
                (error_no, error_str) = xxx_todo_changeme1.args
                print(
                    "[Birthday Reminder] ERROR reading from file %s. Error: %s, %s"
                    % (fileName, error_no, error_str))
                text = _("Error reading file %s.\n\nError: %s, %s") % (
                    fileName, error_no, error_str)
                Notifications.AddNotification(MessageBox,
                                              text,
                                              type=MessageBox.TYPE_ERROR)

            print("[Birthday Reminder] read %s birthdays" % len(tmpList))
        else:
            print("[Birthday Reminder] File %s not found." % fileName)

        self.bDayList = tmpList
Пример #9
0
    def lade_sidebar_dict(self):
        if self.mb.debug: log(inspect.stack)
        
        pfad = os.path.join(self.mb.pfade['files'],'sidebar_content.pkl')
        
        dict_exists, backup_exists = False, False

        if os.path.exists(pfad):
            dict_exists = True
        if not os.path.exists(pfad+'.Backup'):
            backup_exists = False
        
        
        try:  
            if dict_exists:          
                with open(pfad, 'rb') as f:
                    self.mb.dict_sb_content =  pickle_load(f)
                self.ueberpruefe_dict_sb_content(not backup_exists)
                
            elif not backup_exists:
                self.lege_dict_sb_content_an()
                self.lade_Backup()
        except:
            log(inspect.stack,tb())
            
            self.lege_dict_sb_content_an()
            if not backup_exists:
                self.lade_Backup()
            
        if not dict_exists:
            self.speicher_sidebar_dict()

        self.erzeuge_dict_sb_content_Backup()
Пример #10
0
def _get(cache_key, file_name, override_expire_secs=None, pickle=False):

	expire_datetime = None
	cache_path = xbmc_helper().get_file_path(CONST['CACHE_DIR'], file_name)

	if (override_expire_secs is not None):
		expire_datetime = datetime.now() - timedelta(seconds=override_expire_secs)
	elif 'expires' in CONST['CACHE'][cache_key].keys() and CONST['CACHE'][cache_key]['expires'] is not None:
		expire_datetime = datetime.now() - timedelta(seconds=CONST['CACHE'][cache_key]['expires'])

	cache_data = {
	        'data': None,
	        'is_expired': True,
	}

	if path.exists(cache_path):

		filectime = datetime.fromtimestamp(path.getctime(cache_path))
		filemtime = datetime.fromtimestamp(path.getmtime(cache_path))

		if filemtime is None or filectime > filemtime:
			filemtime = filectime
		if pickle is False:
			with io_open(file=cache_path, mode='r', encoding='utf-8') as cache_infile:
				cache_data.update({'data': cache_infile.read()})
		else:
			with io_open(file=cache_path, mode='rb') as cache_infile:
				cache_data.update({'data': pickle_load(cache_infile)})

		if expire_datetime is None or filemtime >= expire_datetime:
			cache_data.update({'is_expired': False})

	return cache_data
  def __enter__( self ):

    if self._mode == "r":
      with open( self._fn, "rb" ) as f:
        state = pickle_load( f );
        self._len_c = state[ "c" ];
        self._len_b = state[ "b" ];
        self._len_x = state[ "x" ];
        self._lenrow = self._len_c + self._len_b + self._len_x;
        self._ic = state[ "ic" ];
        self._icbp = state[ "icbp" ];

    if self._mode == "w":

      with NamedTemporaryFile() as tmpfn:
        self._kdbfn = tmpfn.name + '.kch';
      self._kdb = KDB();
      try:
        assert self._kdb.open( self._kdbfn, KDB.OWRITER | KDB.OCREATE );
      except:
        print( str( self._kdb.error() ) );
        raise;

      with TemporaryDirectory() as tmpdirname:
        self._ldbdn = tmpdirname;
      self._ldb = LDB( self._ldbdn, create_if_missing=True );

    return self;
Пример #12
0
def init_session(remote_address, cookie_data=None):
    if cookie_data is not None:
        cookie = SessionCookie.load(cookie_data)
    else:
        cookie = None

    # Default sid for the session
    sid = sha224('%s-%s'.format(
        remote_address, datetime.utcnow()).encode('utf-8')).hexdigest()
    if cookie is None:
        cookie = SessionCookie(sid)
    else:
        try:
            cookie.get_sid()
        except KeyError:
            # For some reason the cookie did not contain a SID, set to default
            cookie.set_sid(sid)

    # Set the session singleton (there can be only one!)
    global CURRENT_SESSION
    ppath = get_session_pickle_path(cookie.get_sid())
    if isfile(ppath):
        # Load our old session data and initialise the cookie
        try:
            with open(ppath, 'rb') as session_pickle:
                CURRENT_SESSION = pickle_load(session_pickle)
            CURRENT_SESSION.init_cookie(CURRENT_SESSION.get_sid())
        except Exception as e:
            # On any error, just create a new session
            CURRENT_SESSION = Session(cookie)
    else:
        # Create a new session
        CURRENT_SESSION = Session(cookie)
Пример #13
0
def random_walk(seed_node, n, folNm):  # in graph G
    neg_comp = nx_Graph()
    neg_comp.add_node(seed_node)
    node_num = 1
    pres_node = seed_node
    extra = 10
    while node_num < n + extra:
        with open(folNm + "/" + pres_node, 'rb') as f:
            neig_list = pickle_load(f)
        if not neig_list:
            logging_debug("No neighbours")
            break
        if len(neig_list) != 1:
            new_node = rand_choice(list(neig_list.keys()))
        else:
            new_node = list(neig_list.keys())[0]
        wt = neig_list[new_node]
        wt_edge = wt['weight']
        neg_comp.add_edge(pres_node, new_node, weight=wt_edge)
        if len(neg_comp.nodes()) == n:
            break
        pres_node = new_node
        node_num = node_num + 1
        # print(pres_node)
    return neg_comp
Пример #14
0
def loaddump(filename, silent=False):
    """Unserializes data from a file that was written with
    the built-in cPickle module.

    .. Seealso::
        :func:`savedump`
    """
    assert filename is not None and filename != "" and filename != "-", filename

    if not exists(filename):
        if not silent:
            print("loaddump(): File '" + filename +
                  "' does not exist. Returning None.")
        return None

    try:
        if not silent:
            print("loaddump(): Loading data from '" + filename + "'..."),
        with open(filename, "rb") as f:
            data = pickle_load(f)
        if not silent:
            print("Done")
        return data
    except Exception as ex:
        print("ERROR: loaddump(): Could not load data from '" + filename +
              "'.")
        print("       Passing exception to caller.")
        print(str(ex))
        raise ex
Пример #15
0
def getRelationDict():
	if os_path.exists(CONFIG_FILE):
		pkl_file = open(CONFIG_FILE, 'rb')
		if pkl_file:
			volumedict = pickle_load(pkl_file)
			pkl_file.close()
			return volumedict
	return {}
Пример #16
0
def search_metropolis(seed_node, scaler, par_inputs_fn):
    # Picks out of a subset of its neighbors and adds the best node
    with open(par_inputs_fn, 'rb') as f:
        inputs = pickle_load(f)
    with open(inputs['modelfname'], 'rb') as f:
        model = pickle_load(f)
    folNm = inputs['folNm']
    folNm_out = inputs['folNm_out']

    cd, g1 = starting_edge(folNm, seed_node)
    if cd == 0:
        return

    a, b = met(g1, model, scaler, inputs, 0)

    with open(folNm_out + "/" + seed_node, 'wb') as f:
        pickle_dump((a, b), f)
Пример #17
0
def file_pickle_load(path: str):
    if file_test(path):
        with open(path, "rb") as f:
            t = pickle_load(f)
        return t

    print(f"Pickle-File {path} could not be found!")
    return None
Пример #18
0
def pickle_restore(pickle_file_path):
    '''Load data from pickle file'''
    try:
        with open(pickle_file_path, "rb") as f:
            last_session_data = pickle_load(f)
    except Exception:
        return None
    return last_session_data
Пример #19
0
def getVolumeDict():
    if os_path.exists(CONFIG_FILE_VOLUME):
        pkl_file = open(CONFIG_FILE_VOLUME, 'rb')
        if pkl_file:
            volumedict = pickle_load(pkl_file)
            pkl_file.close()
            return volumedict
    return {}
  def __enter__( self ):

    self._cdata = DB();
    self._ddata = DB();

    try:
      if self._mode == "r":
        assert self._cdata.open( self._fn_cdata, DB.OREADER );
      elif self._mode == "w":
        if isfile( self._fn_cdata ):
          remove( self._fn_cdata );
        assert self._cdata.open( self._fn_cdata, DB.OWRITER | DB.OCREATE );
      else:
        assert False;
    except:
      if self._cdata is not None:
        print( str( self._cdata.error() ) );
      raise;

    try:
      if self._mode == "r":
        assert self._ddata.open( self._fn_ddata, DB.OREADER );
      elif self._mode == "w":
        if isfile( self._fn_ddata ):
          remove( self._fn_ddata );
        assert self._ddata.open( self._fn_ddata, DB.OWRITER | DB.OCREATE );
      else:
        assert False;
    except:
      if self._ddata is not None:
        print( str( self._ddata.error() ) );
      raise;

    if self._mode == "r":

      with open( self._fn_meta, 'rb' ) as f:
        r = pickle_load( f );
        self._len_c = r[ "c" ];
        self._len_b = r[ "b" ];
        self._len_x = r[ "x" ];
        self._co = r[ "co" ];

      with open( self._fn_icov, 'rb' ) as f:
        self._icov = pickle_load( f );

    return self;
Пример #21
0
def fill_json(apps, schema_editor):
    for lwemail in apps.get_model('emails', 'LightWeightEmail').objects.all():
        body = lwemail.body

        if body:
            # lwemail.body = b''
            lwemail.body_tmp = json_dump(
                pickle_load(body.encode(), encoding='utf-8'))
            lwemail.save()
Пример #22
0
def search_metropolis_clique_start(scaler,par_inputs_fn,G_clique): # Picks out of a subset of its neighbors and adds the best node 
    #print(seed_clique) 
    with open(par_inputs_fn,'rb') as f:                
        inputs = pickle_load(f)      
    with open(inputs['modelfname'],'rb') as f:
        model = pickle_load(f)       
    g1=nx_Graph(G_clique)
    
    # Finding score 
    (score_prev,comp_bool) = get_score(g1,model,scaler,inputs['model_type']) 
                
    # Removing starting points which are not complexes    
    if comp_bool == 0:
        return ([],0)
              
    g1 = met(g1,model,scaler,inputs,score_prev)
    
    return g1
Пример #23
0
 def load(self, path):
     with open(path, 'rb') as f:
         try:
             odata = pickle_load(f)
             self.data = odata['data']
             self._mc = odata['mc']
             self.classifier = odata['classifier']
         except Exception as e:
             print("Invalid data: %s" % (str(e)))
Пример #24
0
 def load(self, path):
     with open(path, 'rb') as f:
         try:
             odata = pickle_load(f)
             self.data = odata['data']
             self._mc = odata['mc']
             self.classifier = odata['classifier']
         except Exception as e:
             print("Invalid data: %s" % (str(e)))
Пример #25
0
    def _get(self):
        """
        Retrieves the data set model
        :return: map, [instrument][composer][piece][tempo] = abs_download_path
        """
        mapping = {}
        urls = []

        website_dump = OneClassicalDataSet.__name__ + ".dump.pkl"
        if exists("dump.pkl"):
            pp("Restoring Website Information...")
            urls, mapping = pickle_load(open(website_dump, "rb"))
        else:
            pp("Retrieving Website Information for " + self.url)
            for ref_instrument in self._get_ref_links(self.url +
                                                      self.by_instrument):
                instrument = ref_instrument.split("=")[1]
                # print ("Instrument: ", ref_instrument, instrument)
                mapping[instrument] = {}
                for ref_composer in self._get_ref_links(self.url +
                                                        ref_instrument):
                    composer = ref_composer.split("=")[1].split("-")[1].strip()
                    # print ("Composer: ", ref_composer, composer)
                    mapping[instrument][composer] = {}
                    for ref_piece in self._get_ref_links(self.url +
                                                         ref_composer):

                        piece = ref_piece.split("=")[1].split("-")[1].strip()
                        downloads = self._get_downloads(self.url + ref_piece)
                        print(instrument, composer, piece, downloads)
                        mapping[instrument][composer][piece] = downloads
                        urls += list(
                            map(lambda tempo: downloads[tempo], downloads))

            pickle_dump((urls, mapping), open(website_dump, "wb"))
        # Download Data Set
        content = self._download(urls=list(set(urls)))
        # Mapping File Paths to Data Set
        to_remove = []
        for i in mapping:
            for c in mapping[i]:
                for p in mapping[i][c]:
                    for t in mapping[i][c][p]:
                        if mapping[i][c][p][t] in content:
                            mapping[i][c][p][t] = content[mapping[i][c][p][t]]
                        else:
                            to_remove.append([i, c, p, t])

        # Remove all mapping where audio could not be downloaded
        for item in to_remove:
            del (mapping[item[0]][item[1]][item[2]][item[3]])

        # Remove website dump
        remove(website_dump)

        return mapping
Пример #26
0
 def get_resource(self, name: str) -> Any:
     """Get the resource registered under a given name."""
     if name not in self._instance_registry:
         raise ResourceException(f"Resource {name} not found.")
     if self._cache_dir:
         cache_path = self._name_cache_path(name)
         if cache_path.is_file():
             with bz2_open(cache_path, "rb") as fh:
                 return pickle_load(fh)
     return self._instance_registry[name]
Пример #27
0
def search_metropolis_clique_start(scaler, par_inputs_fn, G_clique):
    # Picks out of a subset of its neighbors and adds the best node
    # print(seed_clique)
    with open(par_inputs_fn, 'rb') as f:
        inputs = pickle_load(f)
    with open(inputs['modelfname'], 'rb') as f:
        model = pickle_load(f)
    g1 = nx_Graph(G_clique)

    # Finding score
    score_prev, comp_bool = get_score(g1, model, scaler, inputs['model_type'])

    # Removing starting points which are not complexes
    if score_prev < inputs["classi_thresh"]:
        return
    a, b = met(g1, model, scaler, inputs, score_prev)
    name = " ".join([str(n) for n in g1.nodes()])
    with open(folNm_out + "/" + name, 'wb') as f:
        pickle_dump((a, b), f)
Пример #28
0
def load_pickle(name, raise_exception=False):
    location = join(conf.DIRECTORY, 'pickles', '{}.pickle'.format(name))
    try:
        with open(location, 'rb') as f:
            return pickle_load(f)
    except (FileNotFoundError, EOFError):
        if raise_exception:
            raise FileNotFoundError
        else:
            return None
Пример #29
0
    def load(filename):
        """Load a template from ``filename``, return ``Templater`` object.

        This method must be used in pair with ``Templater.dump`` - it loads
        the template definition from a file using cPickle, creates a
        ``Templater`` object with the definition and returns it.
        """
        with open(filename, 'rb') as loaded_file:
            processed_template = pickle_load(loaded_file)
        return processed_template
Пример #30
0
 def _load(self):
     if not self._cache_filename:
         return {}
     try:
         with open(self._cache_filename) as f:
             return pickle_load(f)
     except IOError:
         #print('Cannot load cache from file {0}'
         #      .format(self._cache_filename))
         return {}
Пример #31
0
def load_pickle(name, raise_exception=False):
    location = join(conf.DIRECTORY, 'pickles', '{}.pickle'.format(name))
    try:
        with open(location, 'rb') as f:
            return pickle_load(f)
    except (FileNotFoundError, EOFError):
        if raise_exception:
            raise FileNotFoundError
        else:
            return None
Пример #32
0
 def __init__(self, conn_string: str) -> None:
     super().__init__(conn_string)
     try:
         with open(self.conn_string, 'rb') as f:
             self.values = pickle_load(f)
     except OSError as e:
         self.logger.warning(
             'Can\'t open file "%s": "%s". Cache unavailable.',
             self.conn_string, e)
         self.values = {}
Пример #33
0
 def _load(self):
     if not self._cache_filename:
         return {}
     try:
         with open(self._cache_filename) as f:
             return pickle_load(f)
     except IOError:
         #print('Cannot load cache from file {0}'
         #      .format(self._cache_filename))
         return {}
Пример #34
0
    def _load_pion_function(self):
        """Returns the universal function on a fixed energy range
        """
        from pickle import load as pickle_load
        from scipy.interpolate import UnivariateSpline

        uf_file = join(config.data_dir, 'pion_spline.pkl')
        with open(uf_file, 'rb') as f:
            tck = pickle_load(f, encoding='latin1')

        self.pion_spl = UnivariateSpline._from_tck(tck)
Пример #35
0
    def _init_mainloop(self) -> None:
        try:
            with open(self._SERVER_CACHE_FILE,
                      "rb") as cache_file, self._cache_lock:
                self._dns_cache = pickle_load(cache_file)
                self._expiration_dict = pickle_load(cache_file)
                self._last_check_time = pickle_load(cache_file)

            os_remove(self._SERVER_CACHE_FILE)
        except OSError:
            self._dns_cache = dict()
            self._expiration_dict = dict()
            self._last_check_time = int(time_time())

        console_listener = Thread(target=self._console_listening, daemon=True)
        console_listener.start()

        expiration_checker = Thread(target=self._expiration_checking,
                                    daemon=True)
        expiration_checker.start()
Пример #36
0
    def _load_pion_function(self):
        """Returns the universal function on a fixed energy range
        """
        from pickle import load as pickle_load
        from scipy.interpolate import UnivariateSpline

        uf_file = join(global_path, 'data/pion_spline.pkl')
        with open(uf_file, 'r') as f:
            tck = pickle_load(f)

        self.pion_spl = UnivariateSpline._from_tck(tck)
Пример #37
0
def search_max_neig(seed_node, scaler, par_inputs_fn):
    with open(par_inputs_fn, 'rb') as f:
        inputs = pickle_load(f)
    with open(inputs['modelfname'], 'rb') as f:
        model = pickle_load(f)  # Seed node
    logging_debug("Seed node is", seed_node)
    folNm = inputs['folNm']
    folNm_out = inputs['folNm_out']
    max_nodes = inputs["max_size"]
    score_curr = 0
    cd, g1 = starting_edge(folNm, seed_node)
    if cd == 0:
        return
    while len(g1) < max_nodes:
        # print(len(g1))
        logging_debug("Adding next node")

        neig_list = read_neigs(g1.nodes(), folNm)
        if not neig_list:  # Checking if empty
            logging_debug("No more neighbors to add")
            break

        node_to_add = max(neig_list.items(),
                          key=lambda elem: elem[1]['weight'])[0]
        g1 = add_newnode(g1, node_to_add,
                         neig_list[node_to_add]['graph_neigs'])

        score_prev = score_curr

        (score_curr, comp_bool) = get_score(g1, model, scaler,
                                            inputs['model_type'])

        if score_curr < inputs["classi_thresh"]:
            logging_debug("Complex found")

            # Remove the node last added
            g1.remove_node(node_to_add)
            score_curr = score_prev
            break
    with open(folNm_out + "/" + seed_node, 'wb') as f:
        pickle_dump((frozenset(g1.nodes()), score_curr), f)
Пример #38
0
 def register_resource(self, name: str, resource: Any) -> None:
     if name in self._instance_registry:
         raise ResourceException(f"Resource name {name} already in use.")
     if self._cache_dir:
         cache_path = self._name_cache_path(name)
         if cache_path.is_file():
             self._logger.debug(
                 f"Using the cached version of the {name} resource")
             with bz2_open(cache_path, "rb") as fh:
                 resource = pickle_load(fh)
     self._instance_registry[name] = resource
     self._name_registry[id(resource)] = name
Пример #39
0
def unpickle_info(filename):
    """
    @param filename: filename
    @type filename: str
    @return: unpickled info
    @rtype: T
    """
    from pickle import load as pickle_load
    with open(filename, 'rb') as f:
        info = pickle_load(f)

    return info
Пример #40
0
    def __init__(self, login, password, cookies=None):
        self._login = login
        self._password = password
        self._session = Session()
        self._cookies = cookies

        if cookies is not None:
            try:
                with open(cookies) as f:
                    self._session.cookies = cookiejar_from_dict(pickle_load(f))
            except IOError:
                _logger.error('Could not load cookies from {0}'.format(self._cookies))
Пример #41
0
def load_result(filename):
    """
    Load (un-pickle) a Result object (or any other object)
    """
    # if npz:
    #     npd = numpy_load(filename)
    #     result = Result(Response(npd['input_matrix'], npd['corr_names'], npd['mon_names'], list(npd['line']),
    #                       'd_jw' in result, assume_sorted=True))
    # else:
    with open(filename, 'rb') as f:
        result = pickle_load(f)
    return result
Пример #42
0
def get_prot_list(test_complex_path):
    with open(test_complex_path, 'rb') as f:
        test_complex_list = pickle_load(f)

    test_complex_nodes = [
        item for sublist in test_complex_list for item in sublist
    ]
    test_prot_list = set(test_complex_nodes)

    with open(test_complex_path + "_prot_list", 'wb') as f:
        pickle_dump(test_prot_list, f)
    return test_prot_list
Пример #43
0
def init_data():
    global data
    try:
        db_file = open("backend/db.p", "rb")
        data = pickle_load(db_file)
        db_file.close()
        print('load success!')
        print(data)
    except FileNotFoundError:
        reset_data()
        print('database reset!')
    Thread(target=commit_data_timer).start()
Пример #44
0
def load(path):
    """Load variable from Pickle file
    
    Args:
        path (str): path of the file to load

    Returns:
        variable read from path
    """
    with open(path, 'rb') as f:
        variable = pickle_load(f)
    return variable
def step15( datadir, subsample ):

  data_pos = [];
  data_neg = [];
  for i in range( 0, len(DIMCLUSTERs) ):
    data_pos.append( [] );
    data_neg.append( [] );
  print( data_pos, data_neg );

  with open( datadir+'/step07_'+subsample+'.pickle', 'rb' ) as f:
    data = pickle_load( f );

  rowcount = 0;

  for ( y, x ) in data:
    
    rowcount += 1;
    if rowcount > 10000:
      break;

    for ( i, dims ) in enumerate( DIMCLUSTERs ):
      xval = 0.0;
      nx = 0.0;
      for dim in dims:
        if dim < 0:
          xval += -x[ abs(dim)-1 ];
        else:
          xval += x[ abs(dim)-1 ];
        nx += 1.0;        
      if y == '0':
        data_neg[ i ].append( xval/nx );
      elif y == '1':
        data_pos[ i ].append( xval/nx );

  data_pos = np.array( data_pos );
  data_neg = np.array( data_neg );

  print( np.cov( data_neg ) );

  n = len(DIMCLUSTERs);
  ( fig, ax ) = plt.subplots( nrows = n, ncols = n, figsize = ( 3*n, 3*n ) );

  for i in range( 0, n ):
    for j in range( 0, n ):
      if j >= i:
        continue;

      ax[i,j].plot( data_neg[i], data_neg[j], marker='o', color='b', linestyle='', alpha=0.66 );
      ax[i,j].plot( data_pos[i], data_pos[j], marker='o', color='r', linestyle='', alpha=0.66 );
      ax[i,j].set_title( str((i,j)) );

  fig.savefig( datadir+'/step15.png' );
Пример #46
0
    def setUpClass(cls):
        """
        Setup!
        """
        here = dirname(__file__).replace('/', '\\')
        here = str(here)

        cls.raw_logger_settings_input = here + "\\input\\input_raw_logger_settings.cfg"
        cls.raw_logger_settings_output = here + "\\output\\output_raw_logger_settings.pickle"
        cls.raw_to_pynames_output = here + '\\output\\output_raw_to_pynames.pickle'
        cls.pynames_to_rvars_output = here + '\\output\\output_pyname_to_recipevar.pickle'

        with open(cls.raw_logger_settings_output, 'rb') as f:
            cls.exp_raw_logger_vars = pickle_load(f)
        cls.raw_to_pynames_input = cls.exp_raw_logger_vars

        with open(cls.raw_to_pynames_output, 'rb') as f:
            cls.exp_raw_to_pynames = pickle_load(f)
        cls.pynames_to_rvars_input = cls.exp_raw_to_pynames

        with open(cls.pynames_to_rvars_output, 'rb') as f:
            cls.exp_rvar_dict = pickle_load(f)
Пример #47
0
	def load(self):
		fileName = config.plugins.birthdayreminder.file.value
		print "[Birthday Reminder] reading from file", fileName
		
		tmpList = []
		if isfile(fileName):
			try:
				f = open(fileName, "r")
				tmpList = pickle_load(f)
				f.close()
			except IOError, (error_no, error_str):
				print "[Birthday Reminder] ERROR reading from file %s. Error: %s, %s" % (fileName, error_no, error_str)
				text = _("Error reading file %s.\n\nError: %s, %s") % (fileName, error_no, error_str)
				Notifications.AddNotification(MessageBox, text, type = MessageBox.TYPE_ERROR)
				
			print "[Birthday Reminder] read %s birthdays" % len(tmpList)
Пример #48
0
 def __init__(self, folder, file, reset=False, lazy_update=False):
     self._file = os_path_join(folder, file)
     self._cache = cache = {}
     self._last = None
     if reset:
         return
     # If cache file already exists
     try:
         with open(self._file, 'rb') as file:
             for filepath, checksum in pickle_load(file).items():
                 # If file still exists
                 if os_path_isfile(filepath):
                     cache[filepath] = checksum
             if lazy_update:
                 self._lcache = cache.copy()
     except (FileNotFoundError, EOFError):
         pass
Пример #49
0
def stop_stats(uri):
    from pickle import load as pickle_load
    from os.path import join as path_join

    cb = get_gfs2_callbacks()
    opq = cb.volumeStartOperations(uri, 'w')

    stats_obj = path_join(
        "/var/run/sr-private",
        cb.getUniqueIdentifier(opq),
        'stats.obj'
    )

    proc = None

    with open(stats_obj, 'r') as f:
        proc = pickle_load(f)

    proc.kill()
    force_unlink(stats_obj)
Пример #50
0
def usePickleToMeta():
    if azienda and os.path.exists(str(os.path.join(promogestDir.replace("_",""),meta_pickle.replace("_","")).strip())):
        print " CONTROLLO DELL'ESISTENZA DEL FILE PICKLE", str(os.path.join(promogestDir.replace("_","")))
        with open(str(os.path.join(promogestDir.replace("_",""),meta_pickle.replace("_","")).strip()), 'rb') as f:
            try:
                meta = pickle_load(f)
                meta.bind = engine
                #try:
                    #meta.tables[azienda+".articolo"]
                #except:
                    #delete_pickle()
            except:
                print "DEVO CANCELLARE IL PICKLE PERCHé NON RIESCO A TROVARLO O LEGGERLO"
                delete_pickle()
            print "USO META PICKLE FAST"
            #meta = MetaData(engine)
    else:
        print "USO META NORMALE"
        meta = MetaData(engine)
    return meta
def step09( datadir ):

  with open( datadir+'/step07_data.pickle', 'rb' ) as f:
    data = pickle_load( f );

  neg_x1 = {};
  neg_x2 = {};
  pos_x1 = {};
  pos_x2 = {};

  for (y,x) in data:

    for (dim1,dim2) in INTERESTING_COMBINATIONs:

      dimpair = "{}_{}".format( dim1, dim2 );

      x1 = x[ dim1 ];
      x2 = x[ dim2 ];
      if y == '0':
        neg_x1[ dimpair ] = neg_x1.get( dimpair, [] ) + [ x1 ];
        neg_x2[ dimpair ] = neg_x2.get( dimpair, [] ) + [ x2 ];
      elif y == '1':
        pos_x1[ dimpair ] = pos_x1.get( dimpair, [] ) + [ x1 ];
        pos_x2[ dimpair ] = pos_x2.get( dimpair, [] ) + [ x2 ];
      else:
        assert False;

  clusters1 = set();
  clusters2 = set();
  clusters3 = set();

  for (dim1,dim2) in INTERESTING_COMBINATIONs:

    dimpair = "{}_{}".format( dim1, dim2 );    

    ( fig, ax ) = plt.subplots( nrows=1, ncols=1, figsize=(6,6) );

    ax.plot( neg_x1[ dimpair ], neg_x2[ dimpair ], marker='o', color='b', linestyle='', alpha=0.66 );
    ax.plot( pos_x1[ dimpair ], pos_x2[ dimpair ], marker='o', color='r', linestyle='', alpha=0.66 );

    fig.savefig( datadir+'/step09.png' );
def step17( datadir, subsample ):

  data_pos = [];
  data_neg = [];

  with open( datadir+'/step07_'+subsample+'.pickle', 'rb' ) as f:
    data = pickle_load( f );

  rowcount = 0;

  for ( y, x ) in data:
    
    rowcount += 1;
    if rowcount > 2500:
      break;

    row = [];
    for dim in DIMs:
      row.append( x[ abs(dim)-1 ] );

    if y == '0':
      data_neg.append( row );
    elif y == '1':
      data_pos.append( row );

  print( len(data_pos), len(data) );
  
  ratio = float( len(data_pos) ) / float( len(data) );
  
  pthresholds \
    = [ ratio * ( float(i) / 10.0 ) for i in range(5,26) ];
  dthresholds \
    = [ ( 0.5 + 0.1417 * float(len(DIMs)-3) ) * float(i)/10.0 \
        for i in range(5,16) ];

  print( ", ".join( [ "{:1.4f}".format(p) for p in pthresholds ] ) );
  print( ", ".join( [ "{:1.4f}".format(d) for d in dthresholds ] ) );

  cov = np.cov( np.array( data_pos+data_neg ).T );
  cov_inv = LA.inv( cov );

  print( cov );
  print( cov_inv );

  data_pos = np.array( data_pos );
  data_neg = np.array( data_neg );

  with open( datadir+'/step17_'+subsample+'.csv', 'wt' ) as out:

    for p_threshold in pthresholds:

      for d_threshold in dthresholds:

        print( '  {:1.2f} {:1.2f}'.format( p_threshold, d_threshold ) );

        total_covered = set();
        pos_covered = set();
        
        for (i,ref_row) in enumerate( data_pos ):

          pos_in_vicinity = set();
          neg_in_vicinity = set();

          ref_row_vec = ref_row.reshape(1,len(ref_row)).T;

          for (j,pos_row) in enumerate( data_pos ):

            pos_row_vec = pos_row.reshape(1,len(pos_row)).T;
            diff = pos_row_vec - ref_row_vec;
            if USE_MAHALANOBIS:
              dist = np.sqrt( np.dot( np.dot( diff.T, cov_inv ), diff ) );
            else:
              dist = LA.norm( diff );

            if dist <= d_threshold:
              pos_in_vicinity.add( j );             

          for (j,neg_row) in enumerate( data_neg ):

            neg_row_vec = neg_row.reshape(1,len(neg_row)).T;
            diff = neg_row_vec - ref_row_vec;
            if USE_MAHALANOBIS:
              dist = np.sqrt( np.dot( np.dot( diff.T, cov_inv ), diff ) );
            else:
              dist = LA.norm( diff );

            if dist <= d_threshold:
              neg_in_vicinity.add( j );
          
          total_in_vicinity = len( pos_in_vicinity ) + len( neg_in_vicinity );

          if total_in_vicinity < 7:
            continue;

          p = float( len( pos_in_vicinity ) ) / float( total_in_vicinity );

          if p > p_threshold:
            total_covered |= pos_in_vicinity;
            total_covered |= neg_in_vicinity;
            pos_covered |= pos_in_vicinity;
            # print( '  ', d_threshold, *stats );

        if ( len(total_covered) > 1 ):
          recall = float( len(pos_covered) ) / float( len(data_pos) );
          precision = float( len(pos_covered) ) / float( len(total_covered) );
          f1 = 2.0 * ( precision * recall ) / ( precision + recall );
          print( '-> {:1.2f} {:1.2f} {:1.4f} {:1.4f} {:1.4f}'.format( p_threshold, d_threshold, f1, precision, recall ) );
          print( '{:1.2f};{:1.2f};{:1.4f};{:1.4f};{:1.4f}'.format( p_threshold, d_threshold, f1, precision, recall ), file=out );
  def __enter__( self ):

    if self._mode == "r":
      with open( self._fn, "rb" ) as f:
        self._state = pickle_load( f );
    return self;
def step08( datadir, subsample ):

  with open( datadir+'/step07_'+subsample+'.pickle', 'rb' ) as f:
    data = pickle_load( f );

  values_by_dim = {};
  values_by_dim_pos = {};
  values_by_dim_neg = {};

  for ( y, x ) in data:

    for ( i, x_val ) in enumerate( x ):

      values_by_dim[ i ] = values_by_dim.get( i, [] ) + [ x_val ];
      if y == '0':
        values_by_dim_neg[ i ] = values_by_dim_neg.get( i, [] ) + [ x_val ];
      elif y == '1':
        values_by_dim_pos[ i ] = values_by_dim_pos.get( i, [] ) + [ x_val ];
      else:
        assert False;

  septiles_by_dim = {};
  septiles_by_dim_pos = {};
  septiles_by_dim_neg = {};

  for i in values_by_dim:

    values_by_dim[ i ].sort();
    values_by_dim_pos[ i ].sort();
    values_by_dim_neg[ i ].sort();

    N = float( len( values_by_dim[ i ] ) );
    Np = float( len( values_by_dim_pos[ i ] ) );
    Nn = float( len( values_by_dim_neg[ i ] ) );

    septiles_by_dim[ i ] \
      = ( values_by_dim[ i ][ int( (0.0*N)/7.0 ) ],
          values_by_dim[ i ][ int( (1.0*N)/7.0 ) ],
          values_by_dim[ i ][ int( (2.0*N)/7.0 ) ],
          values_by_dim[ i ][ int( (3.0*N)/7.0 ) ],
          values_by_dim[ i ][ int( (4.0*N)/7.0 ) -1 ],
          values_by_dim[ i ][ int( (5.0*N)/7.0 ) -1 ],
          values_by_dim[ i ][ int( (6.0*N)/7.0 ) -1 ],
          values_by_dim[ i ][ int( (7.0*N)/7.0 ) -1 ] );

    septiles_by_dim_pos[ i ] \
      = ( values_by_dim_pos[ i ][ int( (0.0*Np)/7.0 ) ],
          values_by_dim_pos[ i ][ int( (1.0*Np)/7.0 ) ],
          values_by_dim_pos[ i ][ int( (2.0*Np)/7.0 ) ],
          values_by_dim_pos[ i ][ int( (3.0*Np)/7.0 ) ],
          values_by_dim_pos[ i ][ int( (4.0*Np)/7.0 ) -1 ],
          values_by_dim_pos[ i ][ int( (5.0*Np)/7.0 ) -1 ],
          values_by_dim_pos[ i ][ int( (6.0*Np)/7.0 ) -1 ],
          values_by_dim_pos[ i ][ int( (7.0*Np)/7.0 ) -1 ] );

    septiles_by_dim_neg[ i ] \
      = ( values_by_dim_neg[ i ][ int( (0.0*Nn)/7.0 ) ],
          values_by_dim_neg[ i ][ int( (1.0*Nn)/7.0 ) ],
          values_by_dim_neg[ i ][ int( (2.0*Nn)/7.0 ) ],
          values_by_dim_neg[ i ][ int( (3.0*Nn)/7.0 ) ],
          values_by_dim_neg[ i ][ int( (4.0*Nn)/7.0 ) -1 ],
          values_by_dim_neg[ i ][ int( (5.0*Nn)/7.0 ) -1 ],
          values_by_dim_neg[ i ][ int( (6.0*Nn)/7.0 ) -1 ],
          values_by_dim_neg[ i ][ int( (7.0*Nn)/7.0 ) -1 ] );

  stats_by_dimpair = {};

  for ( y, x ) in data:

    for dim1 in septiles_by_dim:

      x1 = discretize( x[dim1], septiles_by_dim[dim1] );

      for dim2 in septiles_by_dim:

        if dim1 >= dim2:
          continue;

        x2 = discretize( x[dim2], septiles_by_dim[dim2] );

        if (dim1,dim2) not in stats_by_dimpair:
          stats_by_dimpair[ (dim1,dim2) ] = {};

        ( total, pos ) = stats_by_dimpair[ (dim1,dim2) ].get( (x1,x2), (0,0) );

        total += 1;
        if y == '1':
          pos += 1;

        stats_by_dimpair[ (dim1,dim2) ][ (x1,x2) ] = ( total, pos );

  maxprop_dimpair = [];

  for ( dimpair, stats_by_valuepair ) in stats_by_dimpair.items():
    maxprop = None;
    maxtotal = 0;
    for ( total, pos ) in stats_by_valuepair.values():
      if total < 10:
        continue;
      prop = float(pos) / float(total);
      if maxprop is None:
        maxprop = prop;
      else:
        if prop > maxprop:
          maxprop = prop;
          maxtotal = total;
    maxprop_dimpair.append( (-maxprop,dimpair,maxtotal) );

  with open( datadir+"/step08_"+subsample+"_median_shift_by_dim.txt", "wt" ) as out:

    median_shift_dim = [];
    for i in values_by_dim:
      median_pos = ( septiles_by_dim_pos[ i ][ 3 ] + septiles_by_dim_pos[ i ][ 4 ] ) / 2.0;
      median_neg = ( septiles_by_dim_neg[ i ][ 3 ] + septiles_by_dim_neg[ i ][ 4 ] ) / 2.0;
      median_shift_dim.append( ( abs(median_pos - median_neg), i, median_pos - median_neg ) );
    for ( abs_median_shift, dim, median_shift ) in sorted( median_shift_dim, reverse=True ):
      print( dim, median_shift, sep=';' );
      print( dim, median_shift, sep=';', file=out );

  with open( datadir+"/step08_"+subsample+"_interesting_combinations.txt", "wt" ) as out:

    for (maxprop,dimpair,maxtotal) in sorted( maxprop_dimpair ):
      print( dimpair[0], dimpair[1], -maxprop, maxtotal, sep=';' );
      print( dimpair[0], dimpair[1], -maxprop, maxtotal, sep=';', file=out );
Пример #55
0
    def lade_tags(self):
        if self.mb.debug: log(inspect.stack)
        
        try:
            pfad = os.path.join(self.mb.pfade['settings'],'tags.pkl')
  
            with open(pfad, 'rb') as f:
                self.mb.tags =  pickle_load(f)
                
            self.mb.class_Sidebar.offen = {nr:1 for nr in self.mb.tags['abfolge']} 
        except:
            log(inspect.stack,tb())
            p = Popup(self.mb, zeit=3)
            p.text = LANG.TAGS_NICHT_GEFUNDEN
            self.lege_tags_an()
            self.mb.class_Sidebar.offen = {nr:1 for nr in self.mb.tags['abfolge']} 
        
        
        # Tags ueberpruefen
        try:
            
            ordinale = list(self.mb.props['ORGANON'].dict_bereiche['ordinal'])
            ordinale_tags = list(self.mb.tags['ordinale'])
            
            o_nicht_in_tags = [o for o in ordinale if o not in ordinale_tags]
            ueberfluessige_tags = [o for o in ordinale_tags if o not in ordinale]
            
            
            # 1) fehlende Tags neu anlegen
            if o_nicht_in_tags:
                
                root = self.mb.props['ORGANON'].xml_tree.getroot()
                namen = [root.find('.//' + o).attrib['Name'] for o in o_nicht_in_tags ]
                
                p = Popup(self.mb, zeit=3)
                p.text = LANG.LEGE_TAGS_FUER_DATEI_AN + '\n'.join(namen)
        
                for o in o_nicht_in_tags:
                    self.erzeuge_tags_ordinal_eintrag(o)
                    
                    
            # 2) ueberfluessige Tags loeschen
            if ueberfluessige_tags:
                                
                pfade = { o : os.path.join(self.mb.pfade['odts'], o + '.odt') for o in ueberfluessige_tags}
                existierende = { o : pf for o,pf in pfade.items() if os.path.exists(pf)}
                nicht_existierende = [o for o in ueberfluessige_tags if o not in existierende]
                
                # nicht existierende werden ohne Rueckmeldung geloescht
                for o in nicht_existierende:
                    del self.mb.tags['ordinale'][o]
                
                if existierende:

                    text = []
                    for i,e in enumerate(existierende):
                        if i < 20:
                            text.append(e)
                        else:
                            zaehler = i % 20
                            text[zaehler] = text[zaehler] + '  ' + e
                        
                    
                    nachricht = LANG.DATEIEN_NICHT_IM_PROJEKT_ABER_AUF_FP.format(self.mb.pfade['odts']) + '\n'.join(text)
                    
                    entscheidung = self.mb.entscheidung(nachricht,"warningbox",16777216)
                    # 3 = Nein oder Cancel, 2 = Ja
                    if entscheidung == 3:
                        return
                    elif entscheidung == 2:
                        for o,pfad in existierende.items():
                            del self.mb.tags['ordinale'][o]
                            os.remove(pfad)
                            self.mb.class_Bereiche.plain_txt_loeschen(o)

        
        except:
            log(inspect.stack,tb())
Пример #56
0
def collect(infolder,
            line  = comment_LINE,
            block = comment_BLOCK,
            tags  = WORDS,
            marks = MARKS,
            include=INCLUDE,
            exclude=EXCLUDE,
            overwrite=False):
    # Process block comment marks
    blocks_open, blocks_close = comment_block_comments(block)

    # TODO: Make hidden files OS independent, probably using
    #       https://docs.python.org/3.4/library/tempfile.html ?

    # FIXME: for some reason, if a comment-type ever existed in the TODO
    #        file, but after a while its posts are all gone, the keyword
    #        still remains there, according to the current TODO file,
    #        which still have the "QUESTIONS" keyword, and comment

    # TODO: Add explicit-remove/browsing capabilities of the .*_cache files
    #       (for example: if git reverted changes --> remove hash from cache file)
    #       The best solution would be a complete CLI tool, to read and manage
    #       and use the cutils command line tools

    # Compile regular expression patterns
    pattern1 = re_compile(_COMMENT.format(r'|'.join(map(comment_escape, line)),
                                          blocks_open,
                                          r'|'.join(map(comment_escape, tags)),
                                          r'|'.join(map(comment_escape, marks)),
                                          blocks_close),
                         flags=re_IGNORECASE | re_DOTALL | re_MULTILINE | re_VERBOSE)
    pattern2 = re_compile(r'\n')

    # Get previously generated collection of all posts
    COLLECTED = os_path_join(infolder, '.ccom_todo')
    try:
        with open(COLLECTED, 'rb') as file:
            collected = pickle_load(file)
    except (FileNotFoundError, EOFError):
        collected = table_Table(row=OrderedDict)

    # Clear cache -- remove all non-existing files
    for filepath in collected.rows():
        if not os_path_isfile(filepath):
            del collected[filepath]

    # Exception containers
    except_dirs  = []  # relative path to dir from root
    except_files = []  # relative path to file from root
    except_names = []  # filename (with extension) anywhere
    except_exts  = []  # extension anywhere

    # If 'exclude' is dictionary like object
    try:
        _empty = ()
        # Exceptions relative to root
        for key, container in zip(('folders', 'files'),
                                  (except_dirs, except_files)):
            container.extend(os_path_join(infolder, p) for p in exclude.get(key, _empty))
        # Exceptions anywhere
        for key, container in zip(('names', 'extensions'),
                                  (except_names, except_exts)):
            container.extend(exclude.get(key, _empty))
    # If 'exclude' is an iterable object
    except AttributeError:
        except_names = exclude

    # Include containers
    permit_names = []  # filename (with extension) anywhere
    permit_exts  = []  # extension anywhere

    # If 'include' is dictionary like object
    try:
        _empty = ()
        # Includes anywhere
        for key, container in zip(('names', 'extensions'),
                                  (permit_names, permit_exts)):
            container.extend(include.get(key, _empty))
    # If 'include' is an iterable object
    except AttributeError:
        permit_names = include

    # Scan through all files and folders
    with check_Checker(infolder, file='.ccom_cache') as checker:
        for root, dirs, filenames in os_walk(infolder):
            # If skip this folder and all subfolders
            if root in except_dirs:
                dirs.clear()
                continue
            # Check all files in folder
            for filename in filenames:
                filepath = os_path_join(root, filename)[2:]
                # If skip this exact file
                if filepath in except_files:
                    continue
                name, extension = os_path_splitext(filename)
                # If file or extension is not banned and it is on the
                # white-list and it changed since last time checked and
                # this is not and overwrite-call
                if (filename not in except_names and
                    extension not in except_exts and
                    (extension in permit_exts or filename in permit_names) and
                    checker.ischanged(filepath) and
                    not overwrite):
                    with open(filepath, encoding='utf-8') as file:
                        _search(collected, pattern1, pattern2,
                                file.read(), filepath, marks)

    # Save collection of all posts
    with open(COLLECTED, 'wb') as file:
        pickle_dump(collected, file, pickle_HIGHEST_PROTOCOL)

    # Open the todo file and write out the results
    with open('TODO', 'w', encoding='utf-8') as todo:
        # Make it compatible with cver.py
        todo.write('## INFO ##\n'*2)
        # Format TODO file as yaml
        for key in itertools_chain(tags, marks.values()):
            KEY = key.upper()
            try:
                types = collected[KEY].items()
                len_pos = todo.tell()
                # Offset for separator comment and
                # leading and trailing new lines
                todo.write(' '*82)
                todo.write('{}:\n'.format(KEY))
                index = 1
                for filename, posts in types:
                    for i, (linenumber, content) in enumerate(posts, start=index):
                        todo.write(_ITEM.format(msg='\n'.join(content),
                                                index=i,
                                                short=_SHORT,
                                                long=_SHORT*2,
                                                sep='- '*38,
                                                file=filename,
                                                line=linenumber))
                    index = i + 1
                todo.write('\n')
                # Move back to tag separator comment
                todo.seek(len_pos)
                todo.write('\n#{:-^78}#\n'.format(
                    ' {} POSTS IN {} FILES '.format(index - 1, len(types))))
                # Move back to the end
                todo.seek(0, 2)
            except KeyError:
                continue
        print('CCOM: placed {!r}'.format(os_path_join(infolder, 'TODO')))
Пример #57
0
    def an_09984b_anpassen(self):
        if self.mb.debug: log(inspect.stack) 
        
        try:
            # neue Datum Formatierung
            
            if self.mb.language == 'de':
                datum_format = ['dd','mm','yyyy']
            else:
                datum_format = ['mm','dd','yyyy']
                
            self.mb.settings_proj.update({'datum_trenner' : '.',
                                          'datum_format' : datum_format })
            self.mb.speicher_settings("project_settings.txt", self.mb.settings_proj)  
            
            message = u'''This project was created by an older version of Organon.
The settings of the tagging category date / time has changed.

If your project uses date tags, check if the formatting of dates is in the right order.
Standard Organon date formatting is day/month/year for the german version of Organon and
month/day/year for any other language version of Organon.

The formatting can be set under: Organon menu / File / Settings / Tags / Date Format

A backup of your project with the old settings will be created in the backup folder of your project. 

            '''
            
            Popup(self.mb, 'info').text = message
            self.mb.erzeuge_Backup()
            
            # dict sidebar_content in dict tag ueberfuehren
            pfad = os.path.join(self.mb.pfade['files'],'sidebar_content.pkl')
            pfad3 = os.path.join(self.mb.pfade['files'],'sidebar_content.pkl.Backup')
     
            from pickle import load as pickle_load     
            from pickle import dump as pickle_dump
            with open(pfad, 'rb') as f:
                dict_sb_content =  pickle_load(f)
                
            sb_panels = {
                'Synopsis':LANG.SYNOPSIS,
                'Notes':LANG.NOTIZEN,
                'Images':LANG.BILDER,
                'Tags_general':LANG.ALLGEMEIN,
                'Tags_characters':LANG.CHARAKTERE,
                'Tags_locations':LANG.ORTE,
                'Tags_objects':LANG.OBJEKTE,
                'Tags_time':LANG.ZEIT,
                'Tags_user1':LANG.BENUTZER1,
                'Tags_user2':LANG.BENUTZER2,
                'Tags_user3':LANG.BENUTZER3
                }
            
            sb_panels_tup = (
                'Synopsis',
                'Notes',
                'Images',
                'Tags_general',
                'Tags_characters',
                'Tags_locations',
                'Tags_objects',
                'Tags_time',
                'Tags_user1',
                'Tags_user2',
                'Tags_user3'
                )
            
            tags = {
                    'nr_name' : {
                        0 : [u'SYNOPSIS','txt'],
                        1 : [u'NOTIZEN','txt'],
                        2 : [u'BILDER','img'],
                        3 : [u'ALLGEMEIN','tag'],
                        4 : [u'CHARAKTERE','tag'],
                        5 : [u'ORTE','tag'],
                        6 : [u'OBJEKTE','tag'],
                        7 : [u'DATUM','date'],
                        8 : [u'ZEIT','time'],
                        9 : [u'BENUTZER1','tag'],
                        10 : [u'BENUTZER2','tag'],
                        11 : [u'BENUTZER3','tag']
                       },}
            
            
            alte_kats = list(dict_sb_content['ordinal'][ list(dict_sb_content['ordinal'])[0] ])
            
            name_index = { k : sb_panels_tup.index(k) for k in alte_kats if k in sb_panels_tup }
            index_name = { sb_panels_tup.index(k) : k for k in alte_kats if k in sb_panels_tup }
            
            tags['ordinale'] = { ordin: {name_index[k] : i2
                    for k,i2 in i.items()}
                   for ordin,i in dict_sb_content['ordinal'].items() if isinstance(i, dict)}
                  
            tags['sichtbare'] = [name_index[k] for k in dict_sb_content['sichtbare'] ]
            tags['sammlung'] = {name_index[k]:i for k,i in dict_sb_content['tags'].items() }
            tags['nr_name'] = { i : [ getattr(LANG,k[0]) , k[1] ] for i,k in tags['nr_name'].items()}
            tags['name_nr'] = {  k[0] : i for i,k in tags['nr_name'].items()}
            tags['abfolge'] = list(range(len(tags['nr_name'])))
            
            tags['nr_breite'] = {i:2 for i in range(12)}
            tags['nr_breite'].update({
                                        0 : 5,
                                        1 : 5,
                                        2 : 3
                                      })
            
            # Tags in Tags_Allgemein loeschen, die in anderen Tags vorhanden sind
            from itertools import chain
            alle_tags_in_anderen_panels = list(chain.from_iterable(
                                            [v for i,v in tags['sammlung'].items() if i != 3 ]
                                            ))
            
            for ordi in tags['ordinale']:
                for t in alle_tags_in_anderen_panels:
                    if t in tags['ordinale'][ordi][3]:
                        tags['ordinale'][ordi][3].remove(t)
                    if t in tags['sammlung'][3]:
                        tags['sammlung'][3].remove(t)
            
            
            # Zeit und Datum trennen
            for ordi in tags['ordinale']:
                
                for i in range(11,8,-1):
                    tags['ordinale'][ordi].update({i:tags['ordinale'][ordi][i-1]})
                
                if 'zeit' in tags['ordinale'][ordi][7]:  
                    tags['ordinale'][ordi][8] = tags['ordinale'][ordi][7]['zeit']
                else:
                    tags['ordinale'][ordi][8] = None
                
                if 'datum' not in tags['ordinale'][ordi][7]: 
                    tags['ordinale'][ordi][7] = None
                elif tags['ordinale'][ordi][7]['datum'] == None:
                    tags['ordinale'][ordi][7] = None
                else:
                    dat_split = tags['ordinale'][ordi][7]['datum'].split('.')
                    tags['ordinale'][ordi][7] = {
                                                 datum_format[0] : dat_split[0],
                                                 datum_format[1] : dat_split[1],
                                                 datum_format[2] : dat_split[2],
                                                 }
             
            for i in range(11,8,-1):
                tags['sammlung'].update({i:tags['sammlung'][i-1]})
                
            del tags['sammlung'][8]
            
            
            # neue zeit formatierung
            panel_nr = [i for i,v in tags['nr_name'].items() if v[1] == 'time'][0]
            
            for ordi in tags['ordinale']:
                
                zeit = tags['ordinale'][ordi][panel_nr]
                if zeit == None:
                    continue
                    
                zeit_str = str(zeit)
        
                if len(zeit_str) == 7:
                    zeit_str = '0' + zeit_str
        
                std = int(zeit_str[0:2])
                minu = int(zeit_str[2:4])
                
                tags['ordinale'][ordi][panel_nr] = '{0}:{1}'.format(std,minu)    
                
                
            # tags speichern
            pfad2 = os.path.join(self.mb.pfade['settings'],'tags.pkl')
            with open(pfad2, 'wb') as f:
                pickle_dump(tags, f,2)
            
            try:
                os.remove(pfad)
            except:
                pass
            try:
                os.remove(pfad3)
            except:
                pass
        except:
            log(inspect.stack,tb())
def step15( datadir, subsample ):

  data_ = [];
  classlabels = [];

  with open( datadir+'/step07_'+subsample+'.pickle', 'rb' ) as f:
    data__ = pickle_load( f );

  posdims = [];
  negdims = [];
  for dimcluster in DIMCLUSTERs:
    for dim in dimcluster:
      if dim < 0:
        negdims.append( abs(dim)-1 );
      else:
        posdims.append( abs(dim)-1 );

  rowcount = 0;
  poscnt = 0;
  negcnt = 0;

  for ( y, x ) in data__:
    
    rowcount += 1;
    if rowcount > 2500:
      break;

    row = [];
    for (dim,xval) in enumerate(x):
      if dim in posdims:
        row.append( xval );
      elif dim in negdims:
        # row.append( -xval );
        row.append( xval );
      else:
        #print( "skipping dim", dim );
        row.append( xval );

    data_.append( row );

    if y == '0':
      classlabels.append( 0 );
      negcnt += 1;
    elif y == '1':
      classlabels.append( 1 );
      poscnt += 1;

  data_ = np.array( data_ );
  classlabels = np.array( classlabels );

  ratio = float( poscnt ) / float( poscnt+negcnt );

  data = KernelPCA( n_components=3, kernel='cosine' ).fit_transform( data_ );  

  print( np.cov(data.T) );

  print( data[0] );

  plottable_pos = [];
  plottable_neg = [];
  for i in range(0,3):
    plottable_pos.append( [] );
    plottable_neg.append( [] );

  for (y,x) in zip( classlabels, data ):
    for (i,xval) in enumerate(x):
      if y == 0:
        plottable_neg[ i ].append( xval );
      elif y == 1:
        plottable_pos[ i ].append( xval );

  ( fig, ax ) = plt.subplots( nrows = 3, ncols = 3, figsize = ( 3*3, 3*3 ) );

  for i in range( 0, 3 ):
    for j in range( 0, 3 ):
      if j >= i:
        continue;

      ax[i,j].plot( plottable_neg[i], plottable_neg[j], marker='o', color='b', linestyle='', alpha=0.66 );
      ax[i,j].plot( plottable_pos[i], plottable_pos[j], marker='o', color='r', linestyle='', alpha=0.66 );
      ax[i,j].set_title( str((i,j)) );

  fig.savefig( datadir+'/step20_'+subsample+'.png' );
Пример #59
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]

    np.seterr(all='raise')

    parser, ns, args = init_args(description='Predict label for unlabeled sequences', args=args)

    parser = hmmer_args(parser)

    parser.add_argument('MODEL', type=PathType)
    parser.add_argument('SEQUENCES', type=PathType)

    ARGS = parse_args(parser, args, namespace=ns)

    with gzip_open(ARGS.MODEL, 'rb') as fh:
        try:
            model = pickle_load(fh)
            if model[0] != MODEL_VERSION:
                raise ImportError('incompatible model version')
            ARGS.ENCODER, ARGS.LABEL, hmm, extractor, clf = model[1:]
        except ImportError:
            msg = 'your model is not of the appropriate version, please re-learn your model'
            raise RuntimeError(msg)

    # create a temporary file wherein space characters have been removed
    with open(ARGS.SEQUENCES) as seq_fh:

        def seqrecords():
            is_dna = ARGS.ENCODER == DNAEncoder
            seq_fmt = seqfile_format(ARGS.SEQUENCES)
            source = Verifier(SeqIO.parse(seq_fh, seq_fmt), DNAAlphabet)
            try:
                for record in source:
                    yield record if is_dna else translate(record)
            except VerifyError:
                if is_dna:
                    msg = (
                        "your model specifies a DNA encoding "
                        "which is incompatible with protein sequences"
                        )
                    raise RuntimeError(msg)
                source.set_alphabet(AminoAlphabet)
                for record in source:
                    yield record

        try:
            fd, tmphmm = mkstemp(); close(fd)
            with open(tmphmm, 'wb') as hmm_fh:
                hmm_fh.write(hmm)
                # explicitly gc hmm
                hmm = None
            tmpaln = generate_alignment_(seqrecords(), tmphmm, ARGS)
            alignment = load_stockholm(tmpaln, trim=True)
        finally:
            if exists(tmphmm):
                remove(tmphmm)
            if exists(tmpaln):
                remove(tmpaln)

    X = extractor.transform(alignment)
    y = clf.predict(X)

    feature_names = extractor.get_feature_names()
    support = clf.named_steps['mrmr'].support_
    labels = ['"{0:s}"'.format(feature_names[i]) for i, s in enumerate(support) if s]
    emptys = [' ' * (len(label) + 2) for label in labels]
    idlen = max(len(r.id) for r in alignment) + 3

    print('{{\n  "label": "{0:s}",\n  "predictions": ['.format(ARGS.LABEL), file=ARGS.OUTPUT)
    for i, r in enumerate(alignment):
        if i > 0:
            print(',')
        features = ['[ ']
        for j, x in enumerate(X[i, support]):
            if x:
                features.append(labels[j])
                features.append(', ')
            else:
                features.append(emptys[j])
        features.append(' ]')
        # replace the last comma with a space
        idx = None
        for k, f in enumerate(features):
            if f == ', ':
                idx = k
        if idx is None:
            features[0] = features[0].rstrip()
            features[-1] = features[-1].lstrip()
        else:
            features[idx] = ''
        features_ = ''.join(features)
        print(
            '    {{{{ "id": {{0:<{0:d}s}} "value": {{1: d}}, "features": {{2:s}} }}}}'.format(
                idlen).format('"{0:s}",'.format(r.id), y[i], features_),
            file=ARGS.OUTPUT, end='')
    print('\n  ]\n}', file=ARGS.OUTPUT)

    finalize_args(ARGS)

    return 0