Exemple #1
0
def close_session():
    # Do we have a session to save in the first place?
    if CURRENT_SESSION is None:
        return

    try:
        makedirs(SESSIONS_DIR)
    except OSError as e:
        if e.errno == 17:
            # Already exists
            pass
        else:
            raise

    # Write to a temporary file and move it in place, for safety
    tmp_file_path = None
    try:
        tmp_file_fh, tmp_file_path = mkstemp()
        os_close(tmp_file_fh)

        with open(tmp_file_path, 'wb') as tmp_file:
            pickle_dump(CURRENT_SESSION, tmp_file)
        copy(tmp_file_path, get_session_pickle_path(CURRENT_SESSION.get_sid()))
    except IOError:
        # failed store: no permissions?
        raise SessionStoreError
    finally:
        if tmp_file_path is not None:
            remove(tmp_file_path)
def cache_calc(filename, func, *args, **kwargs):
    """ Cache calculations, so that the first call to this function performs the
        calculations, and caches them to a file. Future calls to this function
        simply load up the data from the cached file.

    Args:
        filename:(str)
            the file path you want to save the cached file as
        func: (callable)
            The function to call to calculate the
        *args:
            ordered arguments to be passed on to func()
        **kwargs:
            keyword arguments to be passed on to func()

    Returns:
        Returns whatever func() returns.
    
    Examples:
        cache_calc("myCachedFile", myFunc)
    """
    # ==========================================================================
    if file_exists(filename):
        print("Loading the cached version of " + filename)
        with open(filename, mode="rb") as fileObj:
            x = pickle_load(fileObj)
    else:
        print("Caching the calculation to the file " + filename)
        x = func(*args, **kwargs)
        # Cache the calculation so future calls to this function load the cached
        # object instead.
        with open(filename, mode="wb") as fileObj:
            pickle_dump(x, fileObj)
    return x
Exemple #3
0
def close_session():
    # Do we have a session to save in the first place?
    if CURRENT_SESSION is None:
        return

    try:
        makedirs(SESSIONS_DIR)
    except OSError as e:
        if e.errno == 17:
            # Already exists
            pass
        else:
            raise

    # Write to a temporary file and move it in place, for safety
    tmp_file_path = None
    try:
        tmp_file_fh, tmp_file_path = mkstemp()
        os_close(tmp_file_fh)

        with open(tmp_file_path, 'wb') as tmp_file:
            pickle_dump(CURRENT_SESSION, tmp_file)
        copy(tmp_file_path, get_session_pickle_path(CURRENT_SESSION.get_sid()))
    except IOError:
        # failed store: no permissions?
        raise SessionStoreError
    finally:
        if tmp_file_path is not None:
            remove(tmp_file_path)
Exemple #4
0
def cache_calc(filename, func, *args, **kwargs):
    """
    Cache calculations, so that the first call to this function performs the
    calculations, and caches them to a file. And future calls to this function
    simply load up the data from the cached file.

    :param filename:(str) the file path you want to save the cached file as
    :param func: The function to call to calculate the
    :param *args: ordered arguments to be passed on to func()
    :param **kwargs: keyword arguments to be passed on to func()
    :return: whatever func() returns.

    :examples:
        cache_calc("myCachedFile", myFunc)
    """
    # ==========================================================================
    if file_exists(filename):
        print("Loading the cached version of " + filename)
        with open(filename, mode="rb") as fileObj:
            x = pickle_load(fileObj)
    else:
        print("Caching the calculation to the file " + filename)
        x = func(*args, **kwargs)
        # Cache the calculation so future calls to this function load the cached
        # object instead.
        with open(filename, mode="wb") as fileObj:
            pickle_dump(x, fileObj)
    return x
Exemple #5
0
def show_and_save(showplot=True, savefig=None, formats=FORMATS, pickleit=False, fig=None):
    """ Maximize the window if need to show it, save it if needed, and then show it or close it.

    - Inspired by https://tomspur.blogspot.fr/2015/08/publication-ready-figures-with.html#Save-the-figure
    """
    if showplot:
        maximizeWindow()
    if savefig is not None:
        if pickleit and fig is not None:
            form = "pickle"
            path = "{}.{}".format(savefig, form)
            print("Saving raw figure with format {}, to file '{}'...".format(form, path))  # DEBUG
            with open(path, "bw") as f:
                pickle_dump(fig, f)
            print("       Saved! '{}' created of size '{}b', at '{:%c}' ...".format(path, os.path.getsize(path), datetime.fromtimestamp(os.path.getatime(path))))
        for form in formats:
            path = "{}.{}".format(savefig, form)
            print("Saving figure with format {}, to file '{}'...".format(form, path))  # DEBUG
            try:
                plt.savefig(path, bbox_inches=BBOX_INCHES)
                print("       Saved! '{}' created of size '{}b', at '{:%c}' ...".format(path, os.path.getsize(path), datetime.fromtimestamp(os.path.getatime(path))))
            except Exception as exc:
                print("Error: could not save current figure to {} because of error {}... Skipping!".format(path, exc))  # DEBUG
    try:
        plt.show(block=True) if showplot else plt.close()
    except (TypeError, AttributeError):
        print("Failed to show the figure for some unknown reason...")  # DEBUG
Exemple #6
0
    def start(self):
        """Start/Continue the Genetic Algorithm"""

        print("generation {}".format(self.generation), self.population[0].gene,
              self.population[0].fitness)

        generation_start_time: float = time()

        for _ in range(1000):
            best_individual = self.__iterate()

            generation_end_time = time()
            pickle_dump(
                best_individual,
                open(
                    path_join(self.output_dir,
                              "best-generation-{}.p".format(self.generation)),
                    "wb"))
            self.generation += 1
            print(
                "generation {}, {}ms".format(
                    self.generation,
                    round(generation_end_time - generation_start_time, 3) *
                    1000), best_individual.gene, best_individual.fitness)
            generation_start_time = generation_end_time

            if 0.99 < best_individual.fitness:
                break
Exemple #7
0
 def save_resource(self, resource: Any) -> None:
     if not self._cache_dir:
         raise ResourceException(
             f"Trying to save a resource but the cache dir was not set.")
     self._cache_dir.mkdir(parents=True, exist_ok=True)
     with bz2_open(self._resource_cache_path(resource), "wb") as fh:
         pickle_dump(resource, fh)
Exemple #8
0
    def _save_button_command(self):
        if self._save_filename_entry.get() != '':
            with open(self._save_filename_entry.get(), 'wb') as file_handle:
                pickle_dump(self._game_map, file_handle)

                self._game_map.remove_all_game_objects()

                self._game_gui.render()
def pickle_save(pickle_file_path, data):
    '''Save data to pickle file'''
    try:
        with open(pickle_file_path, "wb") as f:
            pickle_dump(data, f)
    except Exception:
        return False
    return True
Exemple #10
0
 def save(self) -> None:
     try:
         with open(self.conn_string, 'wb') as f:
             pickle_dump(self.values, f, protocol=HIGHEST_PROTOCOL)
     except OSError as e:
         self.logger.error(
             'Can\'t open file "%s": "%s" for writing. Cache not saved.',
             self.conn_string, e)
Exemple #11
0
 def save(cls, output_file_path):
     """It saves the metrics data to the output file.
     
     Arguments:
         output_file_path {A Path object} -- The output file path to store the metrics data.
     """
     with output_file_path.open(mode = 'wb') as handle:
         pickle_dump(Metric.metric_data, handle)
Exemple #12
0
 def __exit__(self, *exceptions):
     try:
         cache = self._lcache
     except AttributeError:
         cache = self._cache
     # If the context was exited without an exception
     if not all(exceptions):
         with open(self._file, 'wb') as file:
             pickle_dump(cache, file, pickle_HIGHEST_PROTOCOL)
Exemple #13
0
    def speicher_sidebar_dict(self):
        if self.mb.debug: log(inspect.stack)

        try:
            pfad = os.path.join(self.mb.pfade['files'], 'sidebar_content.pkl')
            with open(pfad, 'wb') as f:
                pickle_dump(self.mb.dict_sb_content, f, 2)
        except:
            log(inspect.stack, tb())
Exemple #14
0
 def speicher_tags(self):
     if self.mb.debug: log(inspect.stack)
     
     try:
         pfad = os.path.join(self.mb.pfade['settings'],'tags.pkl')
         with open(pfad, 'wb') as f:
             pickle_dump(self.mb.tags, f,2)
     except:
         log(inspect.stack,tb())
Exemple #15
0
 def speicher_sidebar_dict(self):
     if self.mb.debug: log(inspect.stack)
     
     try:
         pfad = os.path.join(self.mb.pfade['files'],'sidebar_content.pkl')
         with open(pfad, 'wb') as f:
             pickle_dump(self.mb.dict_sb_content, f,2)
     except:
         log(inspect.stack,tb())
Exemple #16
0
 def __exit__(self, *exceptions):
     try:
         cache = self._lcache
     except AttributeError:
         cache = self._cache
     # If the context was exited without an exception
     if not all(exceptions):
         with open(self._file, 'wb') as file:
             pickle_dump(cache, file, pickle_HIGHEST_PROTOCOL)
Exemple #17
0
def _set(cache_key, file_name, data, pickle=False):

	cache_path = xbmc_helper().get_file_path(CONST['CACHE_DIR'], file_name)
	if pickle is False:
		with io_open(file=cache_path, mode='w', encoding='utf-8') as cache_outfile:
			cache_outfile.write(compat._unicode(data))
	else:
		with io_open(file=cache_path, mode='wb') as cache_outfile:
			pickle_dump(data, cache_outfile, protocol=0)
Exemple #18
0
    def _get(self):
        """
        Retrieves the data set model
        :return: map, [instrument][composer][piece][tempo] = abs_download_path
        """
        mapping = {}
        urls = []

        website_dump = OneClassicalDataSet.__name__ + ".dump.pkl"
        if exists("dump.pkl"):
            pp("Restoring Website Information...")
            urls, mapping = pickle_load(open(website_dump, "rb"))
        else:
            pp("Retrieving Website Information for " + self.url)
            for ref_instrument in self._get_ref_links(self.url +
                                                      self.by_instrument):
                instrument = ref_instrument.split("=")[1]
                # print ("Instrument: ", ref_instrument, instrument)
                mapping[instrument] = {}
                for ref_composer in self._get_ref_links(self.url +
                                                        ref_instrument):
                    composer = ref_composer.split("=")[1].split("-")[1].strip()
                    # print ("Composer: ", ref_composer, composer)
                    mapping[instrument][composer] = {}
                    for ref_piece in self._get_ref_links(self.url +
                                                         ref_composer):

                        piece = ref_piece.split("=")[1].split("-")[1].strip()
                        downloads = self._get_downloads(self.url + ref_piece)
                        print(instrument, composer, piece, downloads)
                        mapping[instrument][composer][piece] = downloads
                        urls += list(
                            map(lambda tempo: downloads[tempo], downloads))

            pickle_dump((urls, mapping), open(website_dump, "wb"))
        # Download Data Set
        content = self._download(urls=list(set(urls)))
        # Mapping File Paths to Data Set
        to_remove = []
        for i in mapping:
            for c in mapping[i]:
                for p in mapping[i][c]:
                    for t in mapping[i][c][p]:
                        if mapping[i][c][p][t] in content:
                            mapping[i][c][p][t] = content[mapping[i][c][p][t]]
                        else:
                            to_remove.append([i, c, p, t])

        # Remove all mapping where audio could not be downloaded
        for item in to_remove:
            del (mapping[item[0]][item[1]][item[2]][item[3]])

        # Remove website dump
        remove(website_dump)

        return mapping
  def __exit__( self, exc_type, exc_value, traceback ):

    ex_w_exc = False;
    ex_w_exc = ex_w_exc or ( exc_type is not None );
    ex_w_exc = ex_w_exc or ( exc_value is not None );
    ex_w_exc = ex_w_exc or ( traceback is not None );

    if ( not ex_w_exc ) and ( self._mode == "w" ):

      if self._needs_finalization:
        self._finalize();

      with open( self._fn_meta, 'wb' ) as f:

        r = { "c": self._len_c,
              "b": self._len_b,
              "x": self._len_x,
              "co": self._co };

        pickle_dump( r, f );

      with open( self._fn_icov, 'wb' ) as f:

        pickle_dump( self._icov, f );

    if self._cdata is not None:
      try:
        assert self._cdata.close();
      except:
        print( str( self._cdata.error() ) );
        raise;
      self._cdata = None;

    if self._ddata is not None:
      try:
        assert self._ddata.close();
      except:
        print( str( self._ddata.error() ) );
        raise;
      self._ddata = None;

    if ex_w_exc and ( self._mode == "w" ):

      if isfile( self._fn_cdata ):
        remove( self._fn_cdata );

      if isfile( self._fn_ddata ):
        remove( self._fn_ddata );

      if isfile( self._fn_meta ):
        remove( self._fn_meta );

      if isfile( self._fn_icov ):
        remove( self._fn_icov );

    return False;
Exemple #20
0
def save(variable, path):
    """Save variable on given path using Pickle
    
    Args:
        variable: what to save
        path (str): path of the output
    """
    with open(path, 'wb') as f:
        pickle_dump(variable, f)
    f.close()
Exemple #21
0
    def dump(self, filename):
        """Dump the template object to ``filename`` so you can re-use it later.

        This method uses cPickle to serialize internal template model, so you
        don't need to pass through the learn process everytime you need to
        parse data. It's worth using this method since learning process
        generally cost a lot of time compared to parsing.
        """
        with open(filename, 'wb') as dumped_file:
            pickle_dump(self, dumped_file)
    def saveModel(self):
        path = filedialog.asksaveasfilename()
        if not path:
            return
        try:
            params = {
                "predictor_names": self.predictor_names,
                "label_name": self.label_name,
                "is_round": self.is_round,
                "is_negative": self.is_negative,
                "train_size": self.train_size_var.get(),
                "size_choice": self.size_choice_var.get(),
                "scale_type": self.scale_var.get(),
                "difference_choice": self.difference_choice_var.get(),
                "interval": self.interval_var.get() if self.difference_choice_var.get() else None,
                "second_difference_choice": self.s_difference_choice_var.get(),
                "second_interval": self.s_interval_var.get() if self.s_difference_choice_var.get() else None,
                "acf_lags": self.acf_lags.get(),
                "lag_choice": self.lag_option_var.get(),
                "lag_number": self.lag_entries[self.lag_option_var.get()].get(),
                "num_layers": self.no_optimization_choice_var.get(),
                "num_neurons": [self.neuron_numbers_var[i].get() for i in range(self.no_optimization_choice_var.get())],
                "activations": [self.activation_var[i].get() for i in range(self.no_optimization_choice_var.get())],
                "output_activation": self.output_activation.get(),
                "hyperparameters": {i:j.get() for (i, j) in self.hyperparameters.items()},
                "model": self.model_var.get(),
                "train_loss": self.train_loss.get()
                }
        except:
            popupmsg("Model is not created")
            return

        os.mkdir(path)
        self.model.save(path+"/model.h5") # type: ignore

        if self.scale_var.get() != "None":
            with open(path+"/feature_scaler.pkl", "wb") as f:
                pickle_dump(self.feature_scaler, f)
            with open(path+"/label_scaler.pkl", "wb") as f:
                pickle_dump(self.label_scaler, f)

        if self.difference_choice_var.get():
            with open(path+"/fill.npy", "wb") as outfile:
                np.save(outfile, self.fill_values)
        if self.s_difference_choice_var.get():
            with open(path+"/s_fill.npy", "wb") as outfile:
                np.save(outfile, self.s_fill_values)

        with open(path+"/lags.npy", 'wb') as outfile:
            np.save(outfile, self.lags)
        with open(path+"/last_values.npy", 'wb') as outfile:
            np.save(outfile, self.last)

        with open(path+"/model.json", 'w') as outfile:
            json.dump(params, outfile)
def main():
    parser = argparse_ArgumentParser("Input parameters")
    parser.add_argument("--input_file_name",
                        default="input_toy.yaml",
                        help="Input parameters file name")
    parser.add_argument("--graph_files_dir",
                        default="",
                        help="Graph files' folder path")
    parser.add_argument("--out_dir_name",
                        default="/results",
                        help="Output directory name")
    args = parser.parse_args()
    with open(args.input_file_name, 'r') as f:
        inputs = yaml_load(f, yaml_Loader)

    # Override output directory name if same as gen
    if args.out_dir_name or inputs['out_comp_nm'] == "/results/res":
        if not os_path.exists(inputs['dir_nm'] + args.out_dir_name):
            os_mkdir(inputs['dir_nm'] + args.out_dir_name)
        inputs['out_comp_nm'] = args.out_dir_name + "/res"

    inputs['graph_files_dir'] = ''
    if args.graph_files_dir:
        if not os_path.exists(inputs['dir_nm'] + args.graph_files_dir):
            os_mkdir(inputs['dir_nm'] + args.graph_files_dir)
        inputs['graph_files_dir'] = args.graph_files_dir

    with open(inputs['dir_nm'] + inputs['out_comp_nm'] + "_input.yaml",
              'w') as outfile:
        yaml_dump(inputs, outfile, default_flow_style=False)

    logging_basicConfig(filename=inputs['dir_nm'] + inputs['out_comp_nm'] +
                        "_logs.yaml",
                        level=logging_INFO)
    start_time_read = time_time()
    myGraph = read_graphs(inputs)
    read_time = time_time() - start_time_read

    myGraphName = inputs['dir_nm'] + inputs['graph_files_dir'] + "/res_myGraph"
    with open(myGraphName, 'wb') as f:
        pickle_dump(myGraph, f)

    tot_time = time_time() - start_time

    out_comp_nm = inputs['dir_nm'] + inputs['out_comp_nm']
    # Write to yaml file instead
    with open(out_comp_nm + '_runtime_performance.out', "a") as fid:
        print("Read network time (s) = ",
              read_time,
              "[",
              round(100 * float(read_time) / tot_time, 2),
              "%]",
              file=fid)
        print("Total time (s) = ", tot_time, file=fid)
Exemple #24
0
def savepickle(file_path, contents):
    #
    # Populate serialized content to the pickle with provided file system path
    excepted = "failed to save the contents provided to pickle on file system"
    try:
        pickle_dump(contents, open(file_path, "wb"))
    except Exception as reason:
        raise Exception(excepted) from reason
    #
    # Returns default boolean indicating overall success or failure of routine
    return True
Exemple #25
0
    def get(self):
        """
        Retrieves or loads the data set
        :return: map, data set
        """
        if not self.data:
            self.data = self._get()
            with open(self.pkl, "wb") as handler:
                pickle_dump(self.data, handler)

        return self.data
Exemple #26
0
 def save(self, filename=PICKLE_CACHE_PATH):
     from pickle import UnpicklingError
     from pickle import dump as pickle_dump
     try:
         with open(filename, 'wb') as cache_file:
             pickle_dump(ordall(self._map), cache_file)
             cache_file.close()
     except IOError:
         sys.stderr.write('warning: failed to write cache.\n')
     except:
         sys.stderr.write('warning: unexpected error writing cache.\n')
Exemple #27
0
def get_prot_list(test_complex_path):
    with open(test_complex_path, 'rb') as f:
        test_complex_list = pickle_load(f)

    test_complex_nodes = [
        item for sublist in test_complex_list for item in sublist
    ]
    test_prot_list = set(test_complex_nodes)

    with open(test_complex_path + "_prot_list", 'wb') as f:
        pickle_dump(test_prot_list, f)
    return test_prot_list
  def __exit__( self, exc_type, exc_value, traceback ):

    if exc_type is None and exc_value is None and traceback is None:

      if self._needs_finalization:
        self._finalize();

      if self._mode == "w":
        with open( self._fn, "wb" ) as f:
          pickle_dump( self._state, f );

    return False;
Exemple #29
0
def dump_pickle(name, var):
    folder = join(conf.DIRECTORY, 'pickles')
    try:
        mkdir(folder)
    except FileExistsError:
        pass
    except Exception as e:
        raise OSError("Failed to create 'pickles' folder, please create it manually") from e

    location = join(folder, '{}.pickle'.format(name))
    with open(location, 'wb') as f:
        pickle_dump(var, f, HIGHEST_PROTOCOL)
Exemple #30
0
def dump_pickle(name, var):
    folder = join(conf.DIRECTORY, 'pickles')
    try:
        mkdir(folder)
    except FileExistsError:
        pass
    except Exception as e:
        raise OSError("Failed to create 'pickles' folder, please create it manually") from e

    location = join(folder, '{}.pickle'.format(name))
    with open(location, 'wb') as f:
        pickle_dump(var, f, HIGHEST_PROTOCOL)
Exemple #31
0
 def _get_words_page(self):
     response = self._session.get(URL_COPYBOOKS)
     passport_urls = self._get_urls_containing(
         response.content, URL_PART_PASSPORT)
     if len(passport_urls) == 1:
         response = self._auth(passport_urls[0])
     elif len(passport_urls) > 1:
         raise Exception('too many passport urls on the page, '
                         'dont know what to do')
     save(response.content.decode('utf8'))
     with open(self._cookies, 'w') as f:
         pickle_dump(dict_from_cookiejar(self._session.cookies), f)
     return response
Exemple #32
0
def savedump(data, filename, silent=False):
    """Serializes data to a file using the built-in cPickle module.

    .. Seealso::
        :func:`loaddump`
    """
    assert filename is not None and filename != "" and filename != "-", filename

    if not silent:
        print("savedump(): Saving data to '" + filename + "'..."),
    with open(filename, "wb") as f:
        pickle_dump(data, f, protocol=2)
    if not silent:
        print("Done")
	def save(self, data = None):
		fileName = config.plugins.birthdayreminder.file.value
		print "[Birthday Reminder] writing to file", fileName
		
		try:
			f = open(fileName, "wb")
			if data:
				pickle_dump(data, f)
			else:
				pickle_dump(self.getBirthdayList(), f)
			f.close()
			print "[Birthday Reminder] wrote %s birthdays to %s" % (self.getSize(), fileName)
		except IOError, (error_no, error_str):
			print "[Birthday Reminder] ERROR writing to file %s. Error: %s, %s" % (fileName, error_no, error_str)
			text = _("Error writing file %s.\n\nError: %s, %s") % (fileName, error_no, error_str)
			Notifications.AddNotification(MessageBox, text, type = MessageBox.TYPE_ERROR)
Exemple #34
0
	def save(self, data = None):
		fileName = config.plugins.birthdayreminder.file.value
		print "[Birthday Reminder] writing to file", fileName
		
		try:
			f = open(fileName, "wb")
			if data:
				pickle_dump(data, f)
			else:
				pickle_dump(self.getBirthdayList(), f)
			f.close()
			print "[Birthday Reminder] wrote %s birthdays to %s" % (self.getSize(), fileName)
		except IOError, (error_no, error_str):
			print "[Birthday Reminder] ERROR writing to file %s. Error: %s, %s" % (fileName, error_no, error_str)
			text = _("Error writing file %s.\n\nError: %s, %s") % (fileName, error_no, error_str)
			Notifications.AddNotification(MessageBox, text, type = MessageBox.TYPE_ERROR)
Exemple #35
0
def search_metropolis(seed_node, scaler, par_inputs_fn):
    # Picks out of a subset of its neighbors and adds the best node
    with open(par_inputs_fn, 'rb') as f:
        inputs = pickle_load(f)
    with open(inputs['modelfname'], 'rb') as f:
        model = pickle_load(f)
    folNm = inputs['folNm']
    folNm_out = inputs['folNm_out']

    cd, g1 = starting_edge(folNm, seed_node)
    if cd == 0:
        return

    a, b = met(g1, model, scaler, inputs, 0)

    with open(folNm_out + "/" + seed_node, 'wb') as f:
        pickle_dump((a, b), f)
Exemple #36
0
    def saveModel(self):
        path = filedialog.asksaveasfilename()
        if not path:
            return
        try:
            params = self.model.get_params()
        except:
            popupmsg("Model is not created")
            return
        params["predictor_names"] = self.predictor_names
        params["label_name"] = self.label_name
        params["is_round"] = self.is_round
        params["is_negative"] = self.is_negative
        params["do_forecast"] = self.do_forecast_option.get()
        params["validation_option"] = self.validation_option.get()
        params["random_percent"] = self.random_percent_var.get(
        ) if self.validation_option.get() == 1 else None
        params["k_fold_cv"] = self.cross_val_var.get(
        ) if self.validation_option.get() == 2 else None
        params["lookback_option"] = self.lookback_option.get()
        params["lookback_value"] = self.lookback_val_var.get(
        ) if self.lookback_option.get() else None
        params["seasonal_lookback_option"] = self.seasonal_lookback_option.get(
        )
        params["seasonal_period"] = self.seasonal_period_var.get(
        ) if self.seasonal_lookback_option.get() else None
        params["seasonal_value"] = self.seasonal_val_var.get(
        ) if self.seasonal_lookback_option.get() else None
        params["sliding"] = self.sliding
        params["scale_type"] = self.scale_var.get()

        os.mkdir(path)
        dump(self.model, path + "/model.joblib")
        if self.scale_var.get() != "None":
            with open(path + "/feature_scaler.pkl", "wb") as f:
                pickle_dump(self.feature_scaler, f)
            with open(path + "/label_scaler.pkl", "wb") as f:
                pickle_dump(self.label_scaler, f)
        if self.lookback_option.get() == 1:
            with open(path + "/last_values.npy", 'wb') as outfile:
                np.save(outfile, self.last)
        if self.seasonal_lookback_option.get() == 1:
            with open(path + "/seasonal_last_values.npy", 'wb') as outfile:
                np.save(outfile, self.seasonal_last)
        with open(path + "/model.json", 'w') as outfile:
            json.dump(params, outfile)
def query_impala(queryobj, config=ImpalaConfigFromEnv, request_pool=os.getenv("REQUEST_POOL"), mem_limit="40g", time_query=True, floatify=True):
    if time_query:
        start_time = time.time()
        
    if isinstance(queryobj, QueryObject):
        q = queryobj.query
    elif isinstance(queryobj, str):
        q = queryobj
    else:
        print("invalid type for queryobj. Must be QueryObject or Str")
        raise InvalidQueryType
   
    has_picked_data = False

    if os.getenv("TURN_PICKLE_ON") == "TRUE" and isinstance(queryobj, QueryObject):
        try:
            df = pickle_load(open(f"pickled_data/{queryobj.name}.sav", "rb"))
            print(f"loading from picked state - {queryobj.name}.sav")
            has_picked_data = True
        except:
            pass

    if not has_picked_data:
        df = query_impala_basic(q, config=config, request_pool=request_pool, mem_limit=mem_limit, floatify=floatify)

    if os.getenv("TURN_PICKLE_ON") == "TRUE" and isinstance(queryobj, QueryObject) and has_picked_data == False:
        if not os.path.exists('pickled_data'):
            os.makedirs('pickled_data')
        pickle_dump(df, open(f"pickled_data/{queryobj.name}.sav", "wb"))
    
    if time_query:
        end_time = time.time()
        time_taken = end_time - start_time
        if time_taken > 60:
            time_taken = round(time_taken/60, 1)
            units = "minutes"
        else:
            time_taken = round(time_taken, 0)
            units = "seconds"
            
        print(f"query took {time_taken} {units}")
        
    return df
Exemple #38
0
def search_metropolis_clique_start(scaler, par_inputs_fn, G_clique):
    # Picks out of a subset of its neighbors and adds the best node
    # print(seed_clique)
    with open(par_inputs_fn, 'rb') as f:
        inputs = pickle_load(f)
    with open(inputs['modelfname'], 'rb') as f:
        model = pickle_load(f)
    g1 = nx_Graph(G_clique)

    # Finding score
    score_prev, comp_bool = get_score(g1, model, scaler, inputs['model_type'])

    # Removing starting points which are not complexes
    if score_prev < inputs["classi_thresh"]:
        return
    a, b = met(g1, model, scaler, inputs, score_prev)
    name = " ".join([str(n) for n in g1.nodes()])
    with open(folNm_out + "/" + name, 'wb') as f:
        pickle_dump((a, b), f)
def start_stats(uri):
    import subprocess
    from pickle import dump as pickle_dump
    from os.path import join as path_join

    args = ['/usr/libexec/xapi-storage-script/volume/org.xen.xapi.storage.gfs2/stats.py', uri]
    proc = subprocess.Popen(args)

    cb = get_gfs2_callbacks()
    opq = cb.volumeStartOperations(uri, 'w')

    stats_obj = path_join(
        "/var/run/sr-private",
        cb.getUniqueIdentifier(opq),
        'stats.obj'
    )

    touch(stats_obj)
    with open(stats_obj, 'w') as f:
        pickle_dump(proc, f)
Exemple #40
0
def safe_pickle(obj, filepath, **kwargs):
    """
    @param obj: Any pickleable object
    @type obj:  T
    @param filepath: filepath to save the pickle file
    @type filepath: str
    @param kwargs: dict of pickle kwargs
    @type kwargs: dict
    @return: None
    @rtype: None
    """
    temp = BytesIO()
    pickle_dump(obj, temp, **kwargs)

    tail, _ = path_split(filepath)
    if not path_exists(tail):
        makedirs(tail)

    with open(filepath, 'wb') as f:
        f.write(temp.getvalue())
Exemple #41
0
def main(args=None):
    if args is None:
        args = sys.argv[1:]

    np.seterr(all='raise')

    parser, ns, args = init_args(description='learn model for labeled sequences', args=args)

    parser = hmmer_args(parser)
    parser = featsel_args(parser)
    parser = feature_args(parser)
    parser = mrmr_args(parser)
    parser = rfe_args(parser)
    parser = optstat_args(parser)
    parser = filter_args(parser)
    parser = svm_args(parser)
    parser = cv_args(parser)

    def GzipType(string):
        try:
            return gzip_open(string, 'wb')
        except:
            return ArgumentTypeError("cannot open '{0:s}' for writing".format(string))

    parser.add_argument('--tree', dest='TREE')
    parser.add_argument('ANTIBODY', type=AntibodyTypeFactory(ns.DATA), nargs='+')
    parser.add_argument('MODEL', type=GzipType)

    ARGS = parse_args(parser, args, namespace=ns)

    antibodies = tuple(ARGS.ANTIBODY)

    # do some argument parsing
    if ARGS.TEST:
        test_discrete(ARGS)
        finalize_args(ARGS)
        return {}

    if ARGS.MRMR_METHOD == 'MAXREL':
        ARGS.SIMILAR = 0.0

    # set the util params
    set_util_params(ARGS.REFSEQ.id)

    # grab the relevant antibody from the SQLITE3 data
    # format as SeqRecord so we can output as FASTA
    # and generate an alignment using HMMER if it doesn't already exist
    seqrecords, clonal, antibodies = ARGS.DATA.seqrecords(antibodies, ARGS.CLONAL)

    ab_basename = ''.join((
        '+'.join(antibodies),
        '_dna' if ARGS.ENCODER == DNAEncoder else '_amino',
        '_clonal' if clonal else ''
        ))
    alignment_basename = '_'.join((
        ab_basename,
        ARGS.DATA.basename_root,
        __version__
        ))
    sto_filename = alignment_basename + '.sto'

    alignment, hmm = generate_alignment(seqrecords, sto_filename, is_refseq, ARGS)

    re_pngs = re_compile(r'N[^P][TS][^P]', re_I)

    # compute features
    ylabeler = Labeler(
        partial(expression, ARGS.LABEL),
        partial(skipper, is_refseq, ARGS.SUBTYPES)
    )
    alignment, y, threshold = ylabeler(alignment)

    filter = naive_filter(
        max_conservation=ARGS.MAX_CONSERVATION,
        min_conservation=ARGS.MIN_CONSERVATION,
        max_gap_ratio=ARGS.MAX_GAP_RATIO
        )

    extractors = [('site', SiteVectorizer(ARGS.ENCODER, filter))]

    if ARGS.RADIUS:
        extractors.append(('site_pairs', PairwiseSiteVectorizer(ARGS.ENCODER, filter, ARGS.RADIUS)))

    if ARGS.PNGS:
        extractors.append(('pngs', MotifVectorizer(re_pngs, 4, name='PNGS')))

    if ARGS.PNGS_PAIRS:
        extractors.append(
            ('pngs_pairs', PairwiseMotifVectorizer(re_pngs, 4, name='PNGS'))
            )

    extractor = FeatureUnion(extractors, n_jobs=1)  # n_jobs must be one for now
    X = extractor.fit_transform(alignment)

    Cs = list(C_range(*ARGS.LOG2C))
    scorer = Scorer(ARGS.OPTSTAT)

    # we don't let GridSearchCV do its parallelization over all combinations
    # of grid points, because when the length of FEATURE_GRID is short,
    # it takes way longer than it should

    # usually the # of Cs is larger than the # of ks
    C_jobs = int(getenv('NCPU', -1))
    k_jobs = 1

    # if not, swap the parallelization strategy
    if len(ARGS.FEATURE_GRID) > len(Cs):
        C_jobs, k_jobs = k_jobs, C_jobs

    mrmr = MRMR(
        method=ARGS.MRMR_METHOD,
        normalize=ARGS.MRMR_NORMALIZE,
        similar=ARGS.SIMILAR
        )
    svm = GridSearchCV(
        estimator=SVC(kernel='linear', class_weight='auto'),
        param_grid=dict(C=Cs),
        scoring=scorer,
        n_jobs=C_jobs,
        pre_dispatch='3 * n_jobs'
        )
    pipe = Pipeline([('mrmr', mrmr), ('svm', svm)])

    if len(ARGS.FEATURE_GRID) == 1:
        pipe.set_params(mrmr__k=ARGS.FEATURE_GRID[0], svm__cv=ARGS.CV_FOLDS)
        clf = pipe.fit(X, y)
    else:
        pipe.set_params(svm__cv=ARGS.CV_FOLDS - 1)
        clf = GridSearchCV(
            estimator=pipe,
            param_grid=dict(mrmr__k=ARGS.FEATURE_GRID),
            scoring=scorer,
            n_jobs=k_jobs,
            pre_dispatch='3 * n_jobs',
            cv=ARGS.CV_FOLDS
            ).fit(X, y).best_estimator_

    pickle_dump((MODEL_VERSION, ARGS.ENCODER, ARGS.LABEL, hmm, extractor, clf), ARGS.MODEL)
    ARGS.MODEL.close()

    mrmr_ = clf.named_steps['mrmr']
    svm_ = clf.named_steps['svm'].best_estimator_

    coefs, ranks = coefs_ranks(mrmr_.ranking_, mrmr_.support_, svm_.coef_)
    results = Results(extractor.get_feature_names(), scorer, ARGS.SIMILAR)

    results.add(y, clf.predict(X), coefs, ranks)
    results.metadata(antibodies, ARGS.LABEL)

    print(results.dumps(), file=ARGS.OUTPUT)

    finalize_args(ARGS)

    return ARGS.MODEL
Exemple #42
0
    def an_09984b_anpassen(self):
        if self.mb.debug: log(inspect.stack) 
        
        try:
            # neue Datum Formatierung
            
            if self.mb.language == 'de':
                datum_format = ['dd','mm','yyyy']
            else:
                datum_format = ['mm','dd','yyyy']
                
            self.mb.settings_proj.update({'datum_trenner' : '.',
                                          'datum_format' : datum_format })
            self.mb.speicher_settings("project_settings.txt", self.mb.settings_proj)  
            
            message = u'''This project was created by an older version of Organon.
The settings of the tagging category date / time has changed.

If your project uses date tags, check if the formatting of dates is in the right order.
Standard Organon date formatting is day/month/year for the german version of Organon and
month/day/year for any other language version of Organon.

The formatting can be set under: Organon menu / File / Settings / Tags / Date Format

A backup of your project with the old settings will be created in the backup folder of your project. 

            '''
            
            Popup(self.mb, 'info').text = message
            self.mb.erzeuge_Backup()
            
            # dict sidebar_content in dict tag ueberfuehren
            pfad = os.path.join(self.mb.pfade['files'],'sidebar_content.pkl')
            pfad3 = os.path.join(self.mb.pfade['files'],'sidebar_content.pkl.Backup')
     
            from pickle import load as pickle_load     
            from pickle import dump as pickle_dump
            with open(pfad, 'rb') as f:
                dict_sb_content =  pickle_load(f)
                
            sb_panels = {
                'Synopsis':LANG.SYNOPSIS,
                'Notes':LANG.NOTIZEN,
                'Images':LANG.BILDER,
                'Tags_general':LANG.ALLGEMEIN,
                'Tags_characters':LANG.CHARAKTERE,
                'Tags_locations':LANG.ORTE,
                'Tags_objects':LANG.OBJEKTE,
                'Tags_time':LANG.ZEIT,
                'Tags_user1':LANG.BENUTZER1,
                'Tags_user2':LANG.BENUTZER2,
                'Tags_user3':LANG.BENUTZER3
                }
            
            sb_panels_tup = (
                'Synopsis',
                'Notes',
                'Images',
                'Tags_general',
                'Tags_characters',
                'Tags_locations',
                'Tags_objects',
                'Tags_time',
                'Tags_user1',
                'Tags_user2',
                'Tags_user3'
                )
            
            tags = {
                    'nr_name' : {
                        0 : [u'SYNOPSIS','txt'],
                        1 : [u'NOTIZEN','txt'],
                        2 : [u'BILDER','img'],
                        3 : [u'ALLGEMEIN','tag'],
                        4 : [u'CHARAKTERE','tag'],
                        5 : [u'ORTE','tag'],
                        6 : [u'OBJEKTE','tag'],
                        7 : [u'DATUM','date'],
                        8 : [u'ZEIT','time'],
                        9 : [u'BENUTZER1','tag'],
                        10 : [u'BENUTZER2','tag'],
                        11 : [u'BENUTZER3','tag']
                       },}
            
            
            alte_kats = list(dict_sb_content['ordinal'][ list(dict_sb_content['ordinal'])[0] ])
            
            name_index = { k : sb_panels_tup.index(k) for k in alte_kats if k in sb_panels_tup }
            index_name = { sb_panels_tup.index(k) : k for k in alte_kats if k in sb_panels_tup }
            
            tags['ordinale'] = { ordin: {name_index[k] : i2
                    for k,i2 in i.items()}
                   for ordin,i in dict_sb_content['ordinal'].items() if isinstance(i, dict)}
                  
            tags['sichtbare'] = [name_index[k] for k in dict_sb_content['sichtbare'] ]
            tags['sammlung'] = {name_index[k]:i for k,i in dict_sb_content['tags'].items() }
            tags['nr_name'] = { i : [ getattr(LANG,k[0]) , k[1] ] for i,k in tags['nr_name'].items()}
            tags['name_nr'] = {  k[0] : i for i,k in tags['nr_name'].items()}
            tags['abfolge'] = list(range(len(tags['nr_name'])))
            
            tags['nr_breite'] = {i:2 for i in range(12)}
            tags['nr_breite'].update({
                                        0 : 5,
                                        1 : 5,
                                        2 : 3
                                      })
            
            # Tags in Tags_Allgemein loeschen, die in anderen Tags vorhanden sind
            from itertools import chain
            alle_tags_in_anderen_panels = list(chain.from_iterable(
                                            [v for i,v in tags['sammlung'].items() if i != 3 ]
                                            ))
            
            for ordi in tags['ordinale']:
                for t in alle_tags_in_anderen_panels:
                    if t in tags['ordinale'][ordi][3]:
                        tags['ordinale'][ordi][3].remove(t)
                    if t in tags['sammlung'][3]:
                        tags['sammlung'][3].remove(t)
            
            
            # Zeit und Datum trennen
            for ordi in tags['ordinale']:
                
                for i in range(11,8,-1):
                    tags['ordinale'][ordi].update({i:tags['ordinale'][ordi][i-1]})
                
                if 'zeit' in tags['ordinale'][ordi][7]:  
                    tags['ordinale'][ordi][8] = tags['ordinale'][ordi][7]['zeit']
                else:
                    tags['ordinale'][ordi][8] = None
                
                if 'datum' not in tags['ordinale'][ordi][7]: 
                    tags['ordinale'][ordi][7] = None
                elif tags['ordinale'][ordi][7]['datum'] == None:
                    tags['ordinale'][ordi][7] = None
                else:
                    dat_split = tags['ordinale'][ordi][7]['datum'].split('.')
                    tags['ordinale'][ordi][7] = {
                                                 datum_format[0] : dat_split[0],
                                                 datum_format[1] : dat_split[1],
                                                 datum_format[2] : dat_split[2],
                                                 }
             
            for i in range(11,8,-1):
                tags['sammlung'].update({i:tags['sammlung'][i-1]})
                
            del tags['sammlung'][8]
            
            
            # neue zeit formatierung
            panel_nr = [i for i,v in tags['nr_name'].items() if v[1] == 'time'][0]
            
            for ordi in tags['ordinale']:
                
                zeit = tags['ordinale'][ordi][panel_nr]
                if zeit == None:
                    continue
                    
                zeit_str = str(zeit)
        
                if len(zeit_str) == 7:
                    zeit_str = '0' + zeit_str
        
                std = int(zeit_str[0:2])
                minu = int(zeit_str[2:4])
                
                tags['ordinale'][ordi][panel_nr] = '{0}:{1}'.format(std,minu)    
                
                
            # tags speichern
            pfad2 = os.path.join(self.mb.pfade['settings'],'tags.pkl')
            with open(pfad2, 'wb') as f:
                pickle_dump(tags, f,2)
            
            try:
                os.remove(pfad)
            except:
                pass
            try:
                os.remove(pfad3)
            except:
                pass
        except:
            log(inspect.stack,tb())
Exemple #43
0
    def perform_experiment_a(experiment_config):
        from md5 import new as new_md5
        from pickle import dumps as pickle_dump
        from platform import uname
        from datetime import datetime
        from threading import Thread
        from pprint import PrettyPrinter
        from simple_run import run_test

        pp = PrettyPrinter(indent = 4)
        print "Running experiment with config:"
        pp.pprint(experiment_config)

        current_experiment_id = new_md5(pickle_dump(experiment_config) + str(datetime.now())).hexdigest()
        info_dict = {'experiment_id': current_experiment_id,
                     'start_time': str(datetime.now()),
                     'scheduler': experiment_config['scheduler'],
                     'uname_a': ' '.join(uname()),
                     'cpufreq_governor': get_governor_string(),
                     'mt_mc_state': get_smt_mc_power_savings(),
                     'target_load_level': experiment_config['target_load_level'],
                     'num_of_process': experiment_config['num_of_process'],
                     'num_of_ops': experiment_config['num_of_ops']}
        info_dict.update(get_kernel_olord_settings())
        info_dict.update(get_ondemand_settings())

        eii.execute(info_dict)
        test_args = {'n': experiment_config['num_of_process'],
                     'a': 'dont_set',
                     'l': experiment_config['target_load_level'],
                     'o': experiment_config['num_of_ops']}
        
        class TestThread(Thread):
            def __init__(self, args):
                self.args = args
                Thread.__init__(self)
            def run(self):
                self.total_time = run_test(self.args)

        tt = TestThread(test_args)
        tt.start()

        start_time = time()
        
        data_counter = 0
        while tt.is_alive():
            t = time() - start_time
            data_dict = {'data_id': current_experiment_id + str(data_counter).zfill(16),
                         'experiment_id': current_experiment_id,
                         'time': t,
                         'voltage': psl.voltage,
                         'current': psl.current,
                         'power': psl.power,
                         'temperature': tl.temperature,
                         'cpus_online': ocl.online_cpus}
            eid.execute(data_dict)
            sleep_time = 1.0 - time() + (t + start_time)
            sleep_time = 0.0 if (sleep_time < 0) else sleep_time
            #print sleep_time
            sleep(sleep_time)
            data_counter += 1

        r = session.query(ExperimentInfo).filter_by(experiment_id = current_experiment_id).first()
        r.end_time = str(datetime.now())
        r.total_time = float(tt.total_time)
        session.commit()
Exemple #44
0
def saveRelationDict(dict):
	pkl_file = open(CONFIG_FILE, 'wb')
	if pkl_file:
		pickle_dump(dict, pkl_file)
		pkl_file.close()
def step07( datadir ):

  data = [];
  binplane_data = [];
  catplane_data = [];
  all_data = [];

  with gzip_open( datadir+"/train_trn.tsv.gz", "rt" ) as f:

    pass;

    firstline = f.readline();
    if firstline and firstline[-1] == '\n':
      firstline = firstline[:-1];
    firstline = firstline.split( '\t' );

    assert \
         firstline \
      == (   [ '"id"', '"y"', '"cId"' ]
           + [ '"x{}"'.format(i) for i in range(1,101) ] );

    for line in f:

      if line and line[-1] == '\n':
        line = line[:-1];
      line = line.split( '\t' );

      id_ = line[0];
      y = line[1];
      cid = line[2];

      assert cid[0] == '"';
      assert cid[-1] == '"';
      cid = int( cid[1:-1] );

      x = [];
      b = [];

      for i in range( 3, len(line) ):
        if (i-2) in BINARY_FEATs:
          b.append( line[i] );
        else:
          x.append( float(line[i]) );

      b_ = 0;
      for i in range( 0, len(b) ):
        if b[i] == '0':
          b_i = 0;
        elif b[i] == '1':
          b_i = 1;
        else:
          assert False;
        b_ |= b_i << i;

      cid_ = hex(cid);
      b_ = hex(b_);

      if len( all_data ) < 10000:
        all_data.append( (y,x) );
      if ( cid_ == '0xe' ) and ( b_ == '0x3fffffff' ):
        if len( data ) < 10000:
          data.append( (y,x) );
      if ( cid_ == '0xe' ):
        if len( catplane_data ) < 10000:
          catplane_data.append( (y,x) );
      if ( b_ == '0x3fffffff' ):
        if len( binplane_data ) < 10000:
          binplane_data.append( (y,x) );

  with open( datadir+'/step07_all_data.pickle', 'wb' ) as f:
    pickle_dump( all_data, f );
    print( "all data:", len(all_data) );

  with open( datadir+'/step07_binplane_data.pickle', 'wb' ) as f:
    pickle_dump( binplane_data, f );
    print( "binplane data:", len(binplane_data) );

  with open( datadir+'/step07_catplane_data.pickle', 'wb' ) as f:
    pickle_dump( catplane_data, f );
    print( "catplane data:", len(catplane_data) );

  with open( datadir+'/step07_data.pickle', 'wb' ) as f:
    pickle_dump( data, f );
    print( "data:", len(data) );
Exemple #46
0
def collect(infolder,
            line  = comment_LINE,
            block = comment_BLOCK,
            tags  = WORDS,
            marks = MARKS,
            include=INCLUDE,
            exclude=EXCLUDE,
            overwrite=False):
    # Process block comment marks
    blocks_open, blocks_close = comment_block_comments(block)

    # TODO: Make hidden files OS independent, probably using
    #       https://docs.python.org/3.4/library/tempfile.html ?

    # FIXME: for some reason, if a comment-type ever existed in the TODO
    #        file, but after a while its posts are all gone, the keyword
    #        still remains there, according to the current TODO file,
    #        which still have the "QUESTIONS" keyword, and comment

    # TODO: Add explicit-remove/browsing capabilities of the .*_cache files
    #       (for example: if git reverted changes --> remove hash from cache file)
    #       The best solution would be a complete CLI tool, to read and manage
    #       and use the cutils command line tools

    # Compile regular expression patterns
    pattern1 = re_compile(_COMMENT.format(r'|'.join(map(comment_escape, line)),
                                          blocks_open,
                                          r'|'.join(map(comment_escape, tags)),
                                          r'|'.join(map(comment_escape, marks)),
                                          blocks_close),
                         flags=re_IGNORECASE | re_DOTALL | re_MULTILINE | re_VERBOSE)
    pattern2 = re_compile(r'\n')

    # Get previously generated collection of all posts
    COLLECTED = os_path_join(infolder, '.ccom_todo')
    try:
        with open(COLLECTED, 'rb') as file:
            collected = pickle_load(file)
    except (FileNotFoundError, EOFError):
        collected = table_Table(row=OrderedDict)

    # Clear cache -- remove all non-existing files
    for filepath in collected.rows():
        if not os_path_isfile(filepath):
            del collected[filepath]

    # Exception containers
    except_dirs  = []  # relative path to dir from root
    except_files = []  # relative path to file from root
    except_names = []  # filename (with extension) anywhere
    except_exts  = []  # extension anywhere

    # If 'exclude' is dictionary like object
    try:
        _empty = ()
        # Exceptions relative to root
        for key, container in zip(('folders', 'files'),
                                  (except_dirs, except_files)):
            container.extend(os_path_join(infolder, p) for p in exclude.get(key, _empty))
        # Exceptions anywhere
        for key, container in zip(('names', 'extensions'),
                                  (except_names, except_exts)):
            container.extend(exclude.get(key, _empty))
    # If 'exclude' is an iterable object
    except AttributeError:
        except_names = exclude

    # Include containers
    permit_names = []  # filename (with extension) anywhere
    permit_exts  = []  # extension anywhere

    # If 'include' is dictionary like object
    try:
        _empty = ()
        # Includes anywhere
        for key, container in zip(('names', 'extensions'),
                                  (permit_names, permit_exts)):
            container.extend(include.get(key, _empty))
    # If 'include' is an iterable object
    except AttributeError:
        permit_names = include

    # Scan through all files and folders
    with check_Checker(infolder, file='.ccom_cache') as checker:
        for root, dirs, filenames in os_walk(infolder):
            # If skip this folder and all subfolders
            if root in except_dirs:
                dirs.clear()
                continue
            # Check all files in folder
            for filename in filenames:
                filepath = os_path_join(root, filename)[2:]
                # If skip this exact file
                if filepath in except_files:
                    continue
                name, extension = os_path_splitext(filename)
                # If file or extension is not banned and it is on the
                # white-list and it changed since last time checked and
                # this is not and overwrite-call
                if (filename not in except_names and
                    extension not in except_exts and
                    (extension in permit_exts or filename in permit_names) and
                    checker.ischanged(filepath) and
                    not overwrite):
                    with open(filepath, encoding='utf-8') as file:
                        _search(collected, pattern1, pattern2,
                                file.read(), filepath, marks)

    # Save collection of all posts
    with open(COLLECTED, 'wb') as file:
        pickle_dump(collected, file, pickle_HIGHEST_PROTOCOL)

    # Open the todo file and write out the results
    with open('TODO', 'w', encoding='utf-8') as todo:
        # Make it compatible with cver.py
        todo.write('## INFO ##\n'*2)
        # Format TODO file as yaml
        for key in itertools_chain(tags, marks.values()):
            KEY = key.upper()
            try:
                types = collected[KEY].items()
                len_pos = todo.tell()
                # Offset for separator comment and
                # leading and trailing new lines
                todo.write(' '*82)
                todo.write('{}:\n'.format(KEY))
                index = 1
                for filename, posts in types:
                    for i, (linenumber, content) in enumerate(posts, start=index):
                        todo.write(_ITEM.format(msg='\n'.join(content),
                                                index=i,
                                                short=_SHORT,
                                                long=_SHORT*2,
                                                sep='- '*38,
                                                file=filename,
                                                line=linenumber))
                    index = i + 1
                todo.write('\n')
                # Move back to tag separator comment
                todo.seek(len_pos)
                todo.write('\n#{:-^78}#\n'.format(
                    ' {} POSTS IN {} FILES '.format(index - 1, len(types))))
                # Move back to the end
                todo.seek(0, 2)
            except KeyError:
                continue
        print('CCOM: placed {!r}'.format(os_path_join(infolder, 'TODO')))
def saveVolumeDict(dict):
	pkl_file = open(CONFIG_FILE_VOLUME, 'wb')
	if pkl_file:
		pickle_dump(dict, pkl_file)
		pkl_file.close()
Exemple #48
0
def document(infolder, outfolder, extension, loader, external_css=None,
             generate_toc=None, overwrite=False):
    # Get previously generated TOC object
    TOC = os_path_join(infolder, '.cdoc_toc')
    try:
        with open(TOC, 'rb') as file:
            old_toc = pickle_load(file)
    except (FileNotFoundError, EOFError):
        old_toc = table_Dict2D(OrderedDict)

    # Create new TOC object
    new_toc = table_Dict2D(OrderedDict)

    # TODO: do we really need a separate OrderedDict for pages ???
    pages = OrderedDict()
    anonym = iter_count()

    # TODO: Create real dependency graphs
    #       Document object:
    #           parents  = set()  # other documents depending on this document
    #           children = set()  # other documents this document depending on
    #
    #       If document changed:
    #           set all parents of document => changed
    #
    #       If any of its children changed:
    #           set all parents of child => changed
    #
    #       -- The loop should check if a document's change flag has already
    #          been set. If not, hash file, and set flag, and notify all
    #          dependencies (parents)

    # Load all pages
    with check_Checker(infolder, file='.cdoc_cache', lazy_update=True) as checker:
        # Go through all files
        for file in os_listdir(infolder):
            # If file has the proper extension
            if file.endswith(extension):
                # Create full file path
                filepath = os_path_join(infolder, file)
                # If file has been changed since last check
                if checker.ischanged(filepath) and not overwrite:
                    # Regenerate file
                    filename, pagename, depends = \
                        _process(infolder, file, filepath, pages, loader, anonym)
                # If file hasn't been changed
                else:
                    # If file has been cached before
                    try:
                        # Get previous infos
                        filename, depends = old_toc[filepath]
                        pagename = old_toc.otherkey(filepath)
                        pages[pagename] = None
                        # If any of the dependencies has changed
                        for dependency in depends:
                            if checker.ischanged(dependency) and not overwrite:
                                # Regenerate file
                                filename, pagename, depends = \
                                    _process(infolder, file, filepath, pages, loader, anonym)
                                break
                    # If file is new and hasn't been cached before
                    except KeyError:
                        # Generate it for the first time
                        filename, pagename, depends = \
                            _process(infolder, file, filepath, pages, loader, anonym)
                # Store new values
                new_toc[pagename:filepath] = filename, depends

    # If order changing, renaming, inserting, deleting, etc. happened
    if set(old_toc) - set(new_toc):
        for pagename, filepath in new_toc.keys():
            if pages[pagename] is None:
                _process(os_path_basename(filepath), filepath, pages, loader, anonym)

    # Write back TOC object
    with open(TOC, 'wb') as file:
        pickle_dump(new_toc, file, pickle_HIGHEST_PROTOCOL)
    # Generate Table of Content?
    if generate_toc is None:
        generate_toc = len(new_toc) > 1
    # Create documents
    _build(pages, outfolder, generate_toc, new_toc, external_css)
Exemple #49
0
def get_statistics(directory, base_names, use_cache=True):
    # Check if we have a cache of the costly satistics generation
    # Also, only use it if no file is newer than the cache itself
    cache_file_path = get_stat_cache_by_dir(directory)

    try:
        cache_mtime = getmtime(cache_file_path)
    except OSError as e:
        if e.errno == 2:
            cache_mtime = -1
        else:
            raise

    try:
        if (not isfile(cache_file_path)
                # Has config.py been changed?
                or getmtime(get_config_py_path()) > cache_mtime
                # Any file has changed in the dir since the cache was generated
                or any(True for f in listdir(directory)
                       if (getmtime(path_join(directory, f)) > cache_mtime
                           # Ignore hidden files
                           and not f.startswith('.')))
                # The configuration is newer than the cache
                or getmtime(get_config_path(directory)) > cache_mtime):
            generate = True
            docstats = []
        else:
            generate = False
            try:
                with open(cache_file_path, 'rb') as cache_file:
                    docstats = pickle_load(cache_file)
                if len(docstats) != len(base_names):
                    Messager.warning(
                        'Stats cache %s was incomplete; regenerating' %
                        cache_file_path)
                    generate = True
                    docstats = []
            except UnpicklingError:
                # Corrupt data, re-generate
                Messager.warning(
                    'Stats cache %s was corrupted; regenerating' %
                    cache_file_path, -1)
                generate = True
            except EOFError:
                # Corrupt data, re-generate
                generate = True
    except OSError as e:
        Messager.warning(
            'Failed checking file modification times for stats cache check; regenerating')
        generate = True

    if not use_cache:
        generate = True

    # "header" and types
    stat_types = [("Entities", "int"), ("Relations", "int"), ("Events", "int")]

    if options_get_validation(directory) != 'none':
        stat_types.append(("Issues", "int"))

    if generate:
        # Generate the document statistics from scratch
        from annotation import JOINED_ANN_FILE_SUFF
        log_info('generating statistics for "%s"' % directory)
        docstats = []
        for docname in base_names:
            try:
                with Annotations(path_join(directory, docname),
                                 read_only=True) as ann_obj:
                    tb_count = len([a for a in ann_obj.get_entities()])
                    rel_count = (len([a for a in ann_obj.get_relations()]) +
                                 len([a for a in ann_obj.get_equivs()]))
                    event_count = len([a for a in ann_obj.get_events()])

                    if options_get_validation(directory) == 'none':
                        docstats.append([tb_count, rel_count, event_count])
                    else:
                        # verify and include verification issue count
                        try:
                            from projectconfig import ProjectConfiguration
                            projectconf = ProjectConfiguration(directory)
                            from verify_annotations import verify_annotation
                            issues = verify_annotation(ann_obj, projectconf)
                            issue_count = len(issues)
                        except BaseException:
                            # TODO: error reporting
                            issue_count = -1
                        docstats.append(
                            [tb_count, rel_count, event_count, issue_count])
            except Exception as e:
                log_info('Received "%s" when trying to generate stats' % e)
                # Pass exceptions silently, just marking stats missing
                docstats.append([-1] * len(stat_types))

        # Cache the statistics
        try:
            with open(cache_file_path, 'wb') as cache_file:
                pickle_dump(docstats, cache_file)
        except IOError as e:
            Messager.warning(
                "Could not write statistics cache file to directory %s: %s" %
                (directory, e))

    return stat_types, docstats
Exemple #50
0
    except NoSessionError:
        pass

    # New "fresh" cookie session check
    init_session('127.0.0.1')

    try:
        session = get_session()
        session['foo'] = 'bar'
    except NoSessionError:
        assert False

    # Pickle check
    init_session('127.0.0.1')
    tmp_file_path = None
    try:
        tmp_file_fh, tmp_file_path = mkstemp()
        os_close(tmp_file_fh)
        session = get_session()
        session['foo'] = 'bar'
        with open(tmp_file_path, 'wb') as tmp_file:
            pickle_dump(session, tmp_file)
        del session

        with open(tmp_file_path, 'rb') as tmp_file:
            session = pickle_load(tmp_file)
            assert session['foo'] == 'bar'
    finally:
        if tmp_file_path is not None:
            remove(tmp_file_path)
 def flush(self):
     with self._lock:
         if not self._cache_filename:
             return
         with open(self._cache_filename, 'w') as f:
             pickle_dump(self._cache, f)