Esempio n. 1
0
def _storeArticle(article):
    """
    _safeArticle(Dict) -> Bool

    private help method to safe an aticle

    param article:Dict -
    """
    #    try:
    #make a path according to the article's topics
    path = re_sub('http://www.spiegel.de/','', article['link']).split('/')
    filename = path.pop(-1)
    storePath = os_path_join(BASE_PATH,os_path_join(*path))
    #create directories
    if not os_path_exists(storePath):
        os_makedirs(storePath)
    #write article as json to the file
    with open(os_path_join(storePath, filename),'w') as o:
        json.dump(article, o)
    #write the article name to the log
    if os_path_isfile(BASE_PATH + 'article_log'):
        log = open(BASE_PATH + 'article_log','a')
    else:
        log = open(BASE_PATH + 'article_log','w')
    log.write(article['link'] + '\n')
    log.close()
    return True
Esempio n. 2
0
def _prepare_kafka_ssl_kwargs(cert_path):
    """Prepare SSL kwargs for Kafka producer/consumer."""
    return {
        'security_protocol': 'SSL',
        'ssl_cafile': os_path_join(cert_path, 'ca-cert.pem'),
        'ssl_certfile': os_path_join(cert_path, 'client-cert.pem'),
        'ssl_keyfile': os_path_join(cert_path, 'client-key.pem')
    }
Esempio n. 3
0
    def setUp(self):
        self.tmpdir = os_path_join(gettempdir(), 'passphrase_tests')
        try:
            mkdir(self.tmpdir, 0o755)
        except FileExistsError:
            pass

        self.wordsd_file = os_path_join(self.tmpdir, 'wordsd.list')
        with open(self.wordsd_file, mode='wt', encoding='utf-8') as wordfile:
            wordfile.write('\n'.join(constants.WORDSD))
        self.words_file = os_path_join(self.tmpdir, 'words.list')
        with open(self.words_file, mode='wt', encoding='utf-8') as wordfile:
            wordfile.write('\n'.join(constants.WORDS))
Esempio n. 4
0
    def setup_fs_contents(self, cephfs_mntpt, filenames, filedata):
        filepaths = []
        iter_on = zip((self.mount_a, self.mount_b), filenames, filedata)

        for mount_x, filename, data in iter_on:
            if cephfs_mntpt != '/':
                mount_x.run_shell(args=['mkdir', cephfs_mntpt])
                filepaths.append(
                    os_path_join(mount_x.hostfs_mntpt, cephfs_mntpt, filename))
            else:
                filepaths.append(os_path_join(mount_x.hostfs_mntpt, filename))

            mount_x.write_file(filepaths[-1], data)
    def make_offset_dir(self, save_dir):
        offsets_dir = os_path_join(save_dir, 'offsets')
        try:
            os_mkdir(offsets_dir)
        except FileExistsError:
            pass

        new_dir = os_path_join(offsets_dir, 'time_size_' + str(self.time_size))
        try:
            os_mkdir(new_dir)
        except FileExistsError:
            pass
        return new_dir + '/'
def process_single_target(target_dirname):
    # Load chandat
    chandat_fpath = os_path_join(target_dirname, CHANDAT_FNAME)
    try:
        chandat_obj = get_mat_obj_from_h5py(chandat_fpath)
    except OSError:
        chandat_obj = loadmat(chandat_fpath)

    # print('chandat_obj[\'chandat\'].shape =', chandat_obj['chandat'].shape)
    # Load old_stft_obj
    old_stft_fpath = os_path_join(target_dirname, OLD_STFT_FNAME)
    if os_path_isfile(old_stft_fpath):
        try:
            old_stft_obj = get_mat_obj_from_h5py(old_stft_fpath)
        except OSError:
            old_stft_obj = loadmat(old_stft_fpath)
    else:
        old_stft_obj = r2_dnn_stft(target_dirname, saving_to_disk=False)

    # print('old_stft_obj[\'old_stft_real\'].shape =', old_stft_obj['old_stft_real'].shape)
    # print('old_stft_obj[\'old_stft_imag\'].shape =', old_stft_obj['old_stft_imag'].shape)
    new_stft_object = r3_dnn_apply(target_dirname,
                                   old_stft_obj=old_stft_obj,
                                   saving_to_disk=False)
    del old_stft_obj
    # print('process_single_scan_battery: new_stft_object[\'new_stft_real\'].shape =', new_stft_object['new_stft_real'].shape)
    chandat_dnn_object = r4_dnn_istft(target_dirname,
                                      chandat_obj=chandat_obj,
                                      new_stft_object=new_stft_object,
                                      is_saving_chandat_dnn=False)
    del new_stft_object
    chandat_image_obj = r5_dnn_image(target_dirname,
                                     chandat_obj=chandat_obj,
                                     chandat_dnn_obj=chandat_dnn_object,
                                     is_saving_chandat_image=False)
    del chandat_obj, chandat_dnn_object
    r6_dnn_image_display(target_dirname,
                         dnn_image_obj=chandat_image_obj,
                         show_fig=False)

    # Remove target-level files and folders
    for file in TARGET_FILES_TO_REMOVE:
        file_path = os_path_join(target_dirname, file)
        if os_path_isfile(file_path):
            # print('{}: Trying to remove {}'.format(SCRIPT_FNAME, file_path))
            try:
                os_remove(file_path)
            except:
                raise OSError(
                    'Error: unable to remove file {}'.format(file_path))
Esempio n. 7
0
def init_output_writer(output_dir, portal_name, output_debug, tei_logger):
    """Initialises the class for writing output:
        1. Normal mode: valid XMLs go into a zip file, invalid ones go to output_dir directory
         while a separate file is created to store the hashsums of the zipped files (all filenames are UUIDs)
        2. Debug mode: all XMLs go into output_dir directory (all filenames are slugs from the URL)
    """
    if output_debug:
        output_writer_class = StoreFilesWithReadableName
    else:
        output_writer_class = ValidatorHasherCompressor
    output_writer = output_writer_class(
        tei_logger, os_path_join(output_dir, f'{portal_name}_not_valid'),
        os_path_join(output_dir, f'{portal_name}.zip'),
        os_path_join(output_dir, f'{portal_name}.hashsums'))
    return output_writer
Esempio n. 8
0
def expand_bot_path(filename):
    '''
    '''
    # try "core/"
    first_try = os_path_join(os_path_dirname(__file__), filename)
    if os_path_isfile(first_try):
        return first_try

    # try "core/.."
    second_try = os_path_join(os_path_dirname(__file__), '..', filename)
    if os_path_isfile(second_try):
        return second_try

    raise IOError('File "{0}" not found under "{1}" or "{2}"'.format(
        filename, first_try, second_try))
Esempio n. 9
0
def expand_bot_path(filename):
    '''
    '''
    # try "core/"
    first_try = os_path_join(os_path_dirname(__file__), filename)
    if os_path_isfile(first_try):
        return first_try

    # try "core/.."
    second_try = os_path_join(os_path_dirname(__file__), '..', filename)
    if os_path_isfile(second_try):
        return second_try

    raise IOError('File "{0}" not found under "{1}" or "{2}"'.format(
                  filename, first_try, second_try))
Esempio n. 10
0
def get_and_save_model_dict(kernel_width, num_kernels, num_layers, index):
    model_dict = get_model_dict(kernel_width, num_kernels, num_layers)
    for k in [3, 4, 5]:
        model_dict['k'] = k
        identifier = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        model_dict['save_dir'] = os_path_join(
            'DNNs', 'fcnn_v1.6.7_{}_{}_created'.format(identifier, index),
            'k_{}'.format(k))

        # print(model_params['save_dir'])
        ensure_dir(model_dict['save_dir'])
        save_model_params(
            os_path_join(model_dict['save_dir'], MODEL_PARAMS_FNAME),
            model_dict)
        print('created ', model_dict['save_dir'])
Esempio n. 11
0
def _html_format(parent, source, path):
    # If external file defined
    try:
        soup = BeautifulSoup(open(os_path_join(path, source['HTML'])))
        for tag in soup.body:
            parent.append(tag)
    # If internal structure to parse
    except (TypeError, KeyError):
        for item in source:
            tag = item[0]
            try:
                attrs = {}
                children = []
                for params in item[1:]:
                    # If tag has attributes
                    if isinstance(params, dict):
                        attrs.update(params)
                    # If nested tags
                    elif isinstance(params, list):
                        children.append(params)
                    else:
                        return
                # Try to get string attribute
                string = attrs.pop('string', '')
            except IndexError:
                string = ''
            # Create new tag
            new_tag = new(parent, tag, **attrs)
            # If has string parse it
            if string:
                _str(new_tag, string)
            # If has nested tags
            _html_format(new_tag, children, path)
Esempio n. 12
0
    def __init__(self):
        self.allowed_characters = {
            "alphabets_lower": ",".join(string_ascii_lowercase).split(","),
            "alphabets_upper": ",".join(string_ascii_uppercase).split(","),
            "numbers": [str(number) for number in range(10)],
            "symbols": [
                "!",
                "@",
                "#",
                "$",
                "%",
                "&",
                "*",
                "-",
                "_",
                "+",
                "=",
            ],  # can't use comma since it actually exists in the list
        }

        self.file_name = os_path_join(os_path_expanduser("~"),
                                      ".ron_password_manager")
        if not os_path_exists(self.file_name):
            # generate new random string
            self.secret = self.generate_random_password()
            self.passwords = {}
            self.save_state()
        else:
            with open(self.file_name, "r") as json_file:
                json_output = json_load(json_file)
                self.secret = json_output.get("secret")
                self.passwords = json_output.get("passwords")
        self.vigenere = VigenereCipher(secret=self.secret)
Esempio n. 13
0
    def load_checkpoint(self):
        import pandas as pd

        dataframe_path = os_path_join(self.project_dir, 'train_test_checkpoint.csv')
        dataframe = pd.read_csv(dataframe_path)
        temp_checkpoint = dataframe.to_dict()

        checkpoint = {'last_song_id': temp_checkpoint['last_song_id'][0],
                      'last_epoch': temp_checkpoint['last_epoch'][0],
                      'training': temp_checkpoint['training'][0],
                      'testing': temp_checkpoint['testing'][0],
                      'training_counter': temp_checkpoint['training_counter'][0],
                      'training_samples': temp_checkpoint['training_samples'][0],
                      'validation_counter': temp_checkpoint['validation_counter'][0],
                      'validation_samples': temp_checkpoint['validation_samples'][0],
                      'testing_counter': temp_checkpoint['testing_counter'][0],
                      'testing_samples': temp_checkpoint['testing_samples'][0]
                      }
        self.latest_epoch = temp_checkpoint['last_epoch'][0]
        self.training_counter = temp_checkpoint['training_counter'][0]
        self.training_samples = temp_checkpoint['training_samples'][0]
        self.validation_counter = temp_checkpoint['validation_counter'][0]
        self.validation_counter = temp_checkpoint['validation_samples'][0]
        self.testing_counter = temp_checkpoint['testing_counter'][0]
        self.testing_samples = temp_checkpoint['testing_samples'][0]
        return checkpoint
Esempio n. 14
0
def _html_format(parent, source, path):
    # If external file defined
    try:
        soup = BeautifulSoup(open(os_path_join(path, source['HTML'])))
        for tag in soup.body:
            parent.append(tag)
    # If internal structure to parse
    except (TypeError, KeyError):
        for item in source:
            tag = item[0]
            try:
                attrs = {}
                children = []
                for params in item[1:]:
                    # If tag has attributes
                    if isinstance(params, dict):
                        attrs.update(params)
                    # If nested tags
                    elif isinstance(params, list):
                        children.append(params)
                    else:
                        return
                # Try to get string attribute
                string = attrs.pop('string', '')
            except IndexError:
                string = ''
            # Create new tag
            new_tag = new(parent, tag, **attrs)
            # If has string parse it
            if string:
                _str(new_tag, string)
            # If has nested tags
            _html_format(new_tag, children, path)
Esempio n. 15
0
    def process_one_file(self, url, desired_filename, filename_suff,
                         raw_xml_str):

        xml_etree = etree.fromstring(raw_xml_str)
        xml_filename = check_for_filename_collision(url, desired_filename,
                                                    filename_suff,
                                                    self._assigned_filenames,
                                                    self._tei_logger)
        out_filename = os_path_basename(xml_filename)
        try:
            self._validator.assert_(xml_etree)
            valid = True
        except AssertionError as err:
            self._tei_logger.log('ERROR', 'TEI validation error:', url,
                                 out_filename, err)
            valid = False
        if valid:
            digests = self._hasher.hash_file(BytesIO(raw_xml_str))
            self._zipfile.writestr(xml_filename, raw_xml_str)
            print(out_filename,
                  url,
                  *digests,
                  sep='\t',
                  file=self._hashsums_fh)
        else:
            with open(os_path_join(self._bad_urls_dir, out_filename),
                      'wb') as fh:
                fh.write(raw_xml_str)

        return xml_filename
Esempio n. 16
0
def _image(file, sub_folder, rotate=None):
    pixmap = QPixmap(os_path_join(_BASE_PATH, sub_folder, file))
    if rotate is None:
        return pixmap
    transform = QTransform()
    transform.rotate(rotate)
    return pixmap.transformed(transform)
Esempio n. 17
0
def init_portal(log_dir, output_dir, run_params, portal_name, tei_logger, warc_level_params, rest_config_params):
    """Init variables for processing a portal: HTML Content Tree (This is the only public function of this file)"""
    _ = log_dir, run_params, warc_level_params  # Silence IDE

    article_root_params, decompose_spec, excluded_tags_spec = rest_config_params[1:4]

    # The internal structure of the accumulator is defined in nested_dict function
    accumulator = nested_dict()
    # No files and after processing needed for each article
    after_article_fun, after_article_params, log_file_names_and_modes = dummy_fun, (), ()
    # Filenames for the final function
    final_filenames_and_modes = ((os_path_join(output_dir, f'{portal_name}_tree.txt'), 'w'),)
    # Run this function after all articles are processed
    final_fun = final_tree
    # Process articles one by one with this function
    process_article_fun = process_article
    # From the loaded portal-specific configuration
    #  - TEI logger for logging
    #  - article root params for find_all
    #  - portal-specific decompose functions
    #  - portal-specific simplification rules for the different parts of the attributes,
    #     with merging the irrelevant variations of values
    # Task specific params:
    #  - (sub)function to run after cleaning the article up (decompose unnecessary parts)
    #  - the parameters for the subfunction (accumulator)
    process_article_params = (tei_logger, article_root_params, decompose_spec, excluded_tags_spec,
                              collect_tags_recursively, (accumulator,))
    # Runner function (some task can be run only in single-process mode)
    run_fun = run_single_process

    return accumulator, after_article_fun, after_article_params, log_file_names_and_modes, final_filenames_and_modes, \
        final_fun, process_article_fun, process_article_params, run_fun
def increase_index_and_move(src_folder, dst_folder, file, extension, src_index, dst_index, max_index):
    # Helper function to format the full source and destination path
    path = lambda f, i: os_path_join(f, extension.format(file, i))
    # If destination file's index is lesser than
    # the maximum number of backups allowed
    if src_index <= max_index:
        src = path(src_folder, src_index)
        dst = path(dst_folder, dst_index)
        # If the destination file exists
        if os_path_isfile(dst):
            # Call this function recursivly
            increase_index_and_move(
                src_folder=dst_folder,
                dst_folder=dst_folder,
                file=file,
                extension=extension,
                src_index=dst_index,
                dst_index=dst_index + 1,
                max_index=max_index,
            )
        cleanup = ""
    # If destination file's index is equal or
    # greater than the maximum number of backups allowed
    else:
        src = path(src_folder, max_index - 1)
        dst = path(dst_folder, max_index)
        cleanup = path(src_folder, src_index)
    # Move source file to destination
    try:
        shutil_move(src, dst)
        return cleanup
    # If source does not found
    except FileNotFoundError:
        return ""
Esempio n. 19
0
def calculate_package_search_path(jvm_package_name, source_roots):
  """Return Paths for directories where the given JVMPackageName might exist."""
  rel_package_dir = jvm_package_name.name.replace('.', os_sep)
  if not rel_package_dir.endswith(os_sep):
    rel_package_dir += os_sep
  specs = [os_path_join(srcroot, rel_package_dir) for srcroot in source_roots.srcroots]
  return PathGlobs.create_from_specs('', specs)
Esempio n. 20
0
def create_sub_dir(playlist_dir: str, sub_folder_name: str):
    new_path = os_path_join(playlist_dir, sub_folder_name)
    try:
        os_mkdir(new_path)
    except FileExistsError:
        pass
    return new_path
Esempio n. 21
0
    def __init__(self, package_path, module_name):
        '''
        Constructor.
        
        Args:
            package_path: str
                the path to the embedding package.
            module_name: str
                the name of this embedded module.
        '''
        super(ModuleParser, self).__init__()

        # gets the module Python content
        with open(os_path_join(package_path, module_name), 'r') as fp:
            self.module_content = fp.read()

        # evaluates corresponding lines
        self.module_lines = self.module_content.split('\n')

        # parses it
        self.module_node = ast.parse(self.module_content)
        self.docstring = ast.get_docstring(self.module_node)

        # prepares internal descriptive structure
        self.module_descr = ModuleDescr()
        self.module_name = module_name

        # and visits the ast tree
        self.visit(self.module_node)
Esempio n. 22
0
def process_each_frequency_keras(model_dirname, stft, frequency):
    '''
    Setter method on stft.
    '''
    # 1. Instantiate Neural Network Model
    model_save_fpath = os_path_join(model_dirname, 'k_' + str(frequency),
                                    MODEL_SAVE_FNAME)
    # print('model_save_fpath =', model_save_fpath)
    loaded_model_pipeline = joblib_load(model_save_fpath)

    # 2. Get X_test
    LOGGER.debug('r3.process_each_frequency_keras: stft.shape = {}'.format(
        stft.shape))
    aperture_data = stft[:, :, frequency]  # or stft_frequency

    # 2.1. normalize by L1 norm
    aperture_data_norm = np_linalg_norm(aperture_data, ord=np_inf, axis=1)
    aperture_data /= aperture_data_norm[:, np_newaxis]

    X_test = aperture_data

    # 3. Predict
    y_hat = loaded_model_pipeline.predict(X_test)

    # 4. Postprocess on y_hat
    aperture_data_new = y_hat

    # rescale the data and store new data in stft
    stft[:, :,
         frequency] = aperture_data_new * aperture_data_norm[:, np_newaxis]
Esempio n. 23
0
def rename_files(startpath: str,
                 pattern_string: str,
                 to: str,
                 ignore_case=True,
                 recursive=False) -> bool:

    pattern: Pattern[AnyStr]
    if ignore_case:
        pattern = re.compile(pattern_string, re.IGNORECASE)
    else:
        pattern = re.compile(pattern_string)

    changed_happend = False

    if recursive:
        for root, dirs, files in os_walk(startpath):
            for f in files:
                path = os_path_join(root, f)
                result = rename(pattern, to, path)

                if result:
                    changed_happend = True
    else:
        for path in os_listdir(startpath):
            if Path(path).is_file():
                result = rename(pattern, to, path)

                if result:
                    changed_happend = True

    return changed_happend
    def add_options(self, config, qa_keyword_path):
        """
        Shows which options are possible to the end user
        """
        # loading all keyword dictionaries, for all the available project ids
        # and version ids, in addition to the default ones:
        temp = {}
        for filename in listdir(qa_keyword_path):
            if filename.endswith('.json'):
                project_id, version_id = filename.split('_')[:2]
                temp[project_id] = temp.setdefault(project_id, {})
                with open(os_path_join(qa_keyword_path, filename), 'r') as f:
                    temp[project_id][version_id] = json.load(f)

        # combining all keyword dictionaries to the configurations with the 
        # respective project ids and version ids, including the default ones:
        for project_id in config.keys():
            for version_id in config[project_id].keys():

                for key in temp[project_id][version_id].keys():
                    if key in config[project_id][version_id].keys():
                        new_option = config[project_id][version_id][key] 
                    else:
                        new_option = "what is the " + key + "?"  
                    extra_option = "none, "
                    for token in temp[project_id][version_id][key]:
                        extra_option += token + ", "
                    extra_option = extra_option.strip().strip(',')
                    config[project_id][version_id][key] = [new_option, extra_option]

        return config
Esempio n. 25
0
def _image(file, sub_folder, rotate=None):
    pixmap = QPixmap(os_path_join(_BASE_PATH, sub_folder, file))
    if rotate is None:
        return pixmap
    transform = QTransform()
    transform.rotate(rotate)
    return pixmap.transformed(transform)
def create_one_model_keras(models_dirname, model_type, model_version,
                           model_index):
    '''
    Args:
        models_dirname: str, the name of the directory containing
                        all neural network models. It's a short dirname
                        relative to the project root.
        model_type: str, denoting the type of the model. E.g. alexnet, mlp.
        model_version: int,
    Returns:
        dirpath: str. The dirpath of the created model directory.
    '''
    # 1. Come up with a new model name
    timestamp = datetime_datetime.now().strftime('%Y%m%d%H%M%S')
    model_dirname = '{}_v{}_{}_{}_created'.format(model_type, model_version,
                                                  timestamp, model_index)
    model_dirpath = os_path_join(models_dirname, model_dirname)
    if os_path_isdir(model_dirpath):
        raise OSError(
            '{}: model folder {} should not exist, but it does'.format(
                __name__, model_dirpath))

    # 2. Make that directory
    os_mkdir(model_dirpath)

    return model_dirpath
Esempio n. 27
0
def load_single_value(process_scripts_dirpath, fname):
    path = os_path_join(process_scripts_dirpath, fname)

    with open(path, 'r') as f:
        value = float(f.read())

    return value
def predict_poi_and_save_in_csv(playlist_name):
    base_dir = 'D:\\Documents\\Thesis\\Project Skaterbot\\Playlists\\Mixxx\\'
    playlist_name = playlist_name
    playlist_path = os_path_join(base_dir, playlist_name)

    # Step 1
    print('----------------Decoding MP3 files to WAV----------------')
    playlist = decode_playlist(playlist_path)
    # Step 2
    if len(playlist) > 0:
        print('--------------Creating Transforms from WAVs--------------')
        transform_playlist(playlist_path)
    else:
        print('----------Skipped Creating Transforms from WAVs----------')

    # Step 3
    print('--------------Predicting Relative Locations--------------')
    playlist_locator = PlaylistLocator2Predictor(6, playlist_path)
    playlist_sections = playlist_locator.predict_4_poi_locations()
    df = DataFrame(playlist_sections)
    print('BEFORE ALL:\n', df)
    loc_csv_path = playlist_locator.save_playlist_predictions(playlist_sections)
    # Step 4
    print('----------Predicting Precise Points of Interest----------')
    song_preciser = PreciserPredictor(8, playlist_path, 'cqt')
    new_playlist_sections = song_preciser.predict_playlist_precise_poi(playlist_sections)
    prec_csv_path = song_preciser.save_playlist_predictions(new_playlist_sections)

    return (loc_csv_path, playlist_sections), (prec_csv_path, new_playlist_sections)
    def txts_to_tracking_csv(self,
                             txt_dir='',
                             csv_name='all-tracking.csv',
                             dataset_info_df=None):
        # Check the directory from which the txt-files will be loaded from
        txt_dir = self._standard_check('', txt_dir)

        # List all txt-files
        txts_list = [x for x in os_listdir(txt_dir) if '.txt' in x]

        dfs_list = []
        # For every txt-file receive a DataFrame and put it in a list
        for txt_file in txts_list:
            np_data = self.read_txt(txt_file, txt_dir)

            np_data = self.__add_objectless_frames(np_data, dataset_info_df)
            df = DataFrame(np_data, columns=self.original_format_column_names)

            dfs_list.append(df)
        # Unify all DataFrames of the list
        df = pd_concat(dfs_list)

        # Prepare the CSV's path
        csv_path = os_path_join(txt_dir, csv_name)
        # Save Unified DataFrame to csv-file
        df.to_csv(csv_path)
Esempio n. 30
0
def setup_json_scheduler(build_root, native):
  """Return a build graph and scheduler configured for BLD.json files under the given build root.

  :rtype :class:`pants.engine.scheduler.LocalScheduler`
  """

  symbol_table = ExampleTable()

  # Register "literal" subjects required for these tasks.
  # TODO: Replace with `Subsystems`.
  address_mapper = AddressMapper(build_patterns=('BLD.json',),
                                 parser=JsonParser(symbol_table))

  work_dir = os_path_join(build_root, '.pants.d')
  project_tree = FileSystemProjectTree(build_root)

  goals = {
      'compile': Classpath,
      # TODO: to allow for running resolve alone, should split out a distinct 'IvyReport' product.
      'resolve': Classpath,
      'list': BuildFileAddresses,
      GenGoal.name(): GenGoal,
      'ls': Snapshot,
      'cat': FilesContent,
    }
  tasks = [
      # Codegen
      GenGoal.rule(),
      gen_apache_java_thrift,
      gen_apache_python_thrift,
      gen_scrooge_scala_thrift,
      gen_scrooge_java_thrift,
      SingletonRule(Scrooge, Scrooge(Address.parse('src/scala/scrooge')))
    ] + [
      # scala dependency inference
      reify_scala_sources,
      select_package_address,
      calculate_package_search_path,
      SingletonRule(SourceRoots, SourceRoots(('src/java','src/scala'))),
    ] + [
      # Remote dependency resolution
      ivy_resolve,
      select_rev,
    ] + [
      # Compilers
      isolate_resources,
      write_name_file,
      javac,
      scalac,
    ] + (
      create_graph_rules(address_mapper, symbol_table)
    ) + (
      create_fs_rules()
    )

  return LocalScheduler(work_dir,
                        goals,
                        tasks,
                        project_tree,
                        native)
Esempio n. 31
0
    def stop(self):
        """Terminate LXC container.

        """
        lxc_id = str(int(self.port) - 8080)
        process = Popen(["lxc-stop", "-n", lxc_id], stdout=PIPE, close_fds=True)
        process.wait()
        process = Popen(['lxc-wait', '-n', lxc_id, '-s', 'STOPPED'], stdout=PIPE, close_fds=True)
        process.wait()
        # try to remove LXC containers files in case lxc-wait did not do this for some reason
        try:
            lxc_env = os_path_join(self.build_path, "lxc", str(self.id))
            shutil.rmtree(lxc_env)
        except Exception:
            pass
        # try to restore tty settings
        try:
            Popen(["stty", "sane"])
        except Exception:
            pass

        self.waitoff(timeout=60)

        self.status = False

        return True
    def __init__(self, dataset_dir: str):
        super().__init__(dataset_dir)

        decoded_csv_dir = os_path_join(self.original_format_dir, 'decoded')
        os_makedirs(decoded_csv_dir, exist_ok=True)

        self.mask_handler = AirBus_Mask_Handler()
Esempio n. 33
0
    def start(self, wait_on=True):
        """Create and launch LXC container with switchpp.

        Args:
            wait_on(bool):  Indicates if wait for device status

        """
        self.class_logger.info(
            "Starting LXC for switch with ip:%s port:%s..." %
            (self.ipaddr, self.port))

        # Check if it is an altamodel.
        if os_path_isfile(os_path_join(self.build_path, "bin", "ons-fulcrum")):
            self.class_logger.info("AltaModel is found.")
            self.__class__.SWITCH_APP = {"FulcrumApp"}

        log_wrap_out, log_wrap_err = loggers.pipe_loggers(
            "switchpp%s" % (self.id, ), self.popen_logfile)

        # sudo env LD_LIBRARY_PATH=$PWD/lib ./bin/ons-lxc -n 1 -i br0 -a 10.0.5.101/24 -p 52
        lxc_id = str(int(self.port) - 8080)
        command = [
            "./ons-ctl", "start", "-n", lxc_id, "-i", self.vlab_iface, "-a",
            "%s/24" % self.ipaddr, "-p",
            str(self.ports_count)
        ]
        self.class_logger.debug("LXC start command: %s" % (" ".join(command)))
        process = Popen(command,
                        stdout=log_wrap_out,
                        stderr=log_wrap_err,
                        close_fds=True,
                        cwd=os_path_join(self.build_path, "bin"))
        process = Popen(['lxc-wait', '-n', lxc_id, '-s', 'RUNNING'],
                        stdout=log_wrap_out,
                        stderr=log_wrap_err,
                        close_fds=True)
        process.wait()

        # let's wait until device is up and running:
        if wait_on:
            time.sleep(5)
            self.waiton(timeout=self.startup_time)

        # Set On(True) status
        self.status = True

        return self.xmlproxy
Esempio n. 34
0
 def __init__(self):
     self.file_name = os_path_join(os_path_expanduser("~"), ".list_manager")
     if not os_path_exists(self.file_name):
         self.lists = {}
     else:
         with open(self.file_name, "r") as json_input_file:
             json_output = json_load(json_input_file)
             self.lists = json_output.get("lists")
Esempio n. 35
0
def getResourcePath(filename):
    try:
        basePath = sys._MEIPASS
    except Exception:
        basePath = ''
    path = os_path_join(basePath, filename)
    print(path)
    return path
Esempio n. 36
0
 def _create(self, enable_ssl, cert_path):
     kwargs = {}
     if enable_ssl:
         kwargs.update({
             'security_protocol':
             'SSL',
             'ssl_cafile':
             os_path_join(cert_path, 'ca-cert.pem'),
             'ssl_certfile':
             os_path_join(cert_path, 'client-cert.pem'),
             'ssl_keyfile':
             os_path_join(cert_path, 'client-key.pem')
         })
     self._producer = KafkaProducer(bootstrap_servers=self._location,
                                    retries=5,
                                    compression_type=self._compression,
                                    **kwargs)
Esempio n. 37
0
def calculate_package_search_path(jvm_package_name, source_roots):
    """Return PathGlobs to match directories where the given JVMPackageName might exist."""
    rel_package_dir = jvm_package_name.name.replace('.', os_sep)
    specs = [
        os_path_join(srcroot, rel_package_dir)
        for srcroot in source_roots.srcroots
    ]
    return PathGlobs(include=specs)
Esempio n. 38
0
def loadArticle(path):
    """
    loadArticle(String) -> Dict
    
    Loads one article.

    param path:String - the article path.
    """
    with open(os_path_join(path)) as art: 
        article = json.load(art)
    return article
Esempio n. 39
0
def setup_json_scheduler(build_root, native):
  """Return a build graph and scheduler configured for BLD.json files under the given build root.

  :rtype :class:`pants.engine.scheduler.SchedulerSession`
  """

  symbol_table = ExampleTable()

  # Register "literal" subjects required for these rules.
  address_mapper = AddressMapper(build_patterns=('BLD.json',),
                                 parser=JsonParser(symbol_table))

  work_dir = os_path_join(build_root, '.pants.d')
  project_tree = FileSystemProjectTree(build_root)

  rules = [
      # Codegen
      GenGoal.rule(),
      gen_apache_java_thrift,
      gen_apache_python_thrift,
      gen_scrooge_scala_thrift,
      gen_scrooge_java_thrift,
      SingletonRule(Scrooge, Scrooge(Address.parse('src/scala/scrooge')))
    ] + [
      # scala dependency inference
      reify_scala_sources,
      select_package_address,
      calculate_package_search_path,
      SingletonRule(SourceRoots, SourceRoots(('src/java','src/scala'))),
    ] + [
      # Remote dependency resolution
      ivy_resolve,
      select_rev,
    ] + [
      # Compilers
      isolate_resources,
      write_name_file,
      javac,
      scalac,
    ] + (
      create_graph_rules(address_mapper, symbol_table)
    ) + (
      create_fs_rules()
    )

  scheduler = Scheduler(native,
                        project_tree,
                        work_dir,
                        rules,
                        DEFAULT_EXECUTION_OPTIONS,
                        None,
                        None)
  return scheduler.new_session()
def move_files_to_folder(*args, **kwargs):
    # Maximum backup allowed by user
    BACKUP_COUNT = bpy.context.user_preferences.filepaths.save_version
    # If saving backups option is 'ON'
    if BACKUP_COUNT:
        # Function level constants
        PATH = bpy.data.filepath  # Full path
        FILE = bpy.path.display_name_from_filepath(PATH)  # File name
        CWD = os_path_dirname(PATH)  # Current Working Directory
        CBD = os_path_join(CWD, BACKUP_FOLDER_NAME)  # Current Backup Directory
        REXT = r"{}\.blend(\d+)$".format(FILE)  # Regex to catch backups
        EXT = "{}.blend{}"  # Extension placeholder
        OLD = EXT.format(FILE, BACKUP_COUNT)  # Oldest backup name

        # Create backup directory if not exists
        try:
            os_makedirs(CBD)
        except OSError as e:
            if e.errno != EEXIST:
                # If other error appears then "dir already exists" reraise
                # the caught error again and print out the traceback
                raise OSError("\n".join(traceback_extract_stack())) from None

        # Get all files in current directory, move them to the
        # backup folder, if they are backup files and maintain
        # the backup folder's instances
        for filename in reversed(sorted(os_listdir(CWD))):
            # If file is a backup file
            try:
                index = int(re_findall(REXT, filename)[-1])
                # If file's index is greater than the
                # current number of backups allowed the full path
                # of the file will be returned and will be deleted
                # else os.remove will raise FileNotFoundError
                os_remove(
                    increase_index_and_move(
                        src_folder=CWD,
                        dst_folder=CBD,
                        file=FILE,
                        extension=EXT,
                        src_index=index,
                        dst_index=index,
                        max_index=BACKUP_COUNT,
                    )
                )
            # If file is not a backup file
            except (IndexError, FileNotFoundError):
                pass

        # If everything went fine, print out information
        if PRINT_INFO:
            print(INFO_TEXT.format(CWD, CBD))
Esempio n. 41
0
    def setUp(self):
        '''Create a temporary directory with sample configuration files'''

        self.tmp_dir    = mkdtemp()
        self.fileNames  = [ ]
        for k in testConfig.keys():
            # create the file name
            fileName = os_path_join(self.tmp_dir, k + ".yaml")

            # append to the list
            self.fileNames.append(fileName)

            # write to the file
            f = open(fileName, 'w')
            f.write(testConfig[k].replace("<<DIR>>", self.tmp_dir))
            f.close()

        # write the keychain
        keychainFile = os_path_join(self.tmp_dir, "keychain.yaml")
        f = open(keychainFile, 'w')
        f.write(keychain_conf)
        f.close()
Esempio n. 42
0
    def _keychainFind(self):
        """Finds the keychain file"""

        # iterate each dir
        for d in self._dirNames:

            # build the expected name
            fullPath = os_path_join(d, self._fileName)

            # if exists, return that
            if exists(fullPath):
                return fullPath

        # fail
        #####raise IOError('Keychain file [%s] not found' % self._fileName)
        return None
Esempio n. 43
0
def _replace(parent, keyindex, child, path, loader, depends):
    try:
        # FIXME: if external file changed -> change the whole file
        #        check for all special features and find out how
        #        to store them properly in the cache files
        filepath = os_path_join(path, child['FILE'])
        depends.add(filepath)
        with open(filepath, encoding='utf-8') as file:
            content = file.read()
            # Replace variables with values if there is any
            for key, value in child.get('VARS', {}).items():
                content = content.replace(key, value)
            # Reassign new child into parent
            parent[keyindex] = loader(content)
    except (KeyError, TypeError):
        pass
Esempio n. 44
0
 def __init__(self, folder, file, reset=False, lazy_update=False):
     self._file = os_path_join(folder, file)
     self._cache = cache = {}
     self._last = None
     if reset:
         return
     # If cache file already exists
     try:
         with open(self._file, 'rb') as file:
             for filepath, checksum in pickle_load(file).items():
                 # If file still exists
                 if os_path_isfile(filepath):
                     cache[filepath] = checksum
             if lazy_update:
                 self._lcache = cache.copy()
     except (FileNotFoundError, EOFError):
         pass
Esempio n. 45
0
def loadArticles(categories=[]):
    """
    loadArticles([String]=[]) -> [Dict]
    
    Loads all Articles from the Database.

    param categories:[String] - the categories for the articles.
    """
    articles = []
    articlePaths = []
    with open(os_path_join(BASE_PATH, 'article_log'), 'r') as log:
        for line in log:
            articlePath = re_sub('http://www.spiegel.de/','',line.strip())
            for category in categories:
                if category in articlePath.split('/'):
                    articlePaths.append(articlePath)
                    break
    for articlePath in articlePaths:
        articles.append(loadArticle(articlePath))
    return articles
Esempio n. 46
0
def calculate_package_search_path(jvm_package_name, source_roots):
  """Return Paths for directories where the given JVMPackageName might exist."""
  rel_package_dir = jvm_package_name.name.replace('.', os_sep)
  return Paths([Path(os_path_join(srcroot, rel_package_dir))
                for srcroot in source_roots.srcroots])
Esempio n. 47
0
def collect(infolder,
            line  = comment_LINE,
            block = comment_BLOCK,
            tags  = WORDS,
            marks = MARKS,
            include=INCLUDE,
            exclude=EXCLUDE,
            overwrite=False):
    # Process block comment marks
    blocks_open, blocks_close = comment_block_comments(block)

    # TODO: Make hidden files OS independent, probably using
    #       https://docs.python.org/3.4/library/tempfile.html ?

    # FIXME: for some reason, if a comment-type ever existed in the TODO
    #        file, but after a while its posts are all gone, the keyword
    #        still remains there, according to the current TODO file,
    #        which still have the "QUESTIONS" keyword, and comment

    # TODO: Add explicit-remove/browsing capabilities of the .*_cache files
    #       (for example: if git reverted changes --> remove hash from cache file)
    #       The best solution would be a complete CLI tool, to read and manage
    #       and use the cutils command line tools

    # Compile regular expression patterns
    pattern1 = re_compile(_COMMENT.format(r'|'.join(map(comment_escape, line)),
                                          blocks_open,
                                          r'|'.join(map(comment_escape, tags)),
                                          r'|'.join(map(comment_escape, marks)),
                                          blocks_close),
                         flags=re_IGNORECASE | re_DOTALL | re_MULTILINE | re_VERBOSE)
    pattern2 = re_compile(r'\n')

    # Get previously generated collection of all posts
    COLLECTED = os_path_join(infolder, '.ccom_todo')
    try:
        with open(COLLECTED, 'rb') as file:
            collected = pickle_load(file)
    except (FileNotFoundError, EOFError):
        collected = table_Table(row=OrderedDict)

    # Clear cache -- remove all non-existing files
    for filepath in collected.rows():
        if not os_path_isfile(filepath):
            del collected[filepath]

    # Exception containers
    except_dirs  = []  # relative path to dir from root
    except_files = []  # relative path to file from root
    except_names = []  # filename (with extension) anywhere
    except_exts  = []  # extension anywhere

    # If 'exclude' is dictionary like object
    try:
        _empty = ()
        # Exceptions relative to root
        for key, container in zip(('folders', 'files'),
                                  (except_dirs, except_files)):
            container.extend(os_path_join(infolder, p) for p in exclude.get(key, _empty))
        # Exceptions anywhere
        for key, container in zip(('names', 'extensions'),
                                  (except_names, except_exts)):
            container.extend(exclude.get(key, _empty))
    # If 'exclude' is an iterable object
    except AttributeError:
        except_names = exclude

    # Include containers
    permit_names = []  # filename (with extension) anywhere
    permit_exts  = []  # extension anywhere

    # If 'include' is dictionary like object
    try:
        _empty = ()
        # Includes anywhere
        for key, container in zip(('names', 'extensions'),
                                  (permit_names, permit_exts)):
            container.extend(include.get(key, _empty))
    # If 'include' is an iterable object
    except AttributeError:
        permit_names = include

    # Scan through all files and folders
    with check_Checker(infolder, file='.ccom_cache') as checker:
        for root, dirs, filenames in os_walk(infolder):
            # If skip this folder and all subfolders
            if root in except_dirs:
                dirs.clear()
                continue
            # Check all files in folder
            for filename in filenames:
                filepath = os_path_join(root, filename)[2:]
                # If skip this exact file
                if filepath in except_files:
                    continue
                name, extension = os_path_splitext(filename)
                # If file or extension is not banned and it is on the
                # white-list and it changed since last time checked and
                # this is not and overwrite-call
                if (filename not in except_names and
                    extension not in except_exts and
                    (extension in permit_exts or filename in permit_names) and
                    checker.ischanged(filepath) and
                    not overwrite):
                    with open(filepath, encoding='utf-8') as file:
                        _search(collected, pattern1, pattern2,
                                file.read(), filepath, marks)

    # Save collection of all posts
    with open(COLLECTED, 'wb') as file:
        pickle_dump(collected, file, pickle_HIGHEST_PROTOCOL)

    # Open the todo file and write out the results
    with open('TODO', 'w', encoding='utf-8') as todo:
        # Make it compatible with cver.py
        todo.write('## INFO ##\n'*2)
        # Format TODO file as yaml
        for key in itertools_chain(tags, marks.values()):
            KEY = key.upper()
            try:
                types = collected[KEY].items()
                len_pos = todo.tell()
                # Offset for separator comment and
                # leading and trailing new lines
                todo.write(' '*82)
                todo.write('{}:\n'.format(KEY))
                index = 1
                for filename, posts in types:
                    for i, (linenumber, content) in enumerate(posts, start=index):
                        todo.write(_ITEM.format(msg='\n'.join(content),
                                                index=i,
                                                short=_SHORT,
                                                long=_SHORT*2,
                                                sep='- '*38,
                                                file=filename,
                                                line=linenumber))
                    index = i + 1
                todo.write('\n')
                # Move back to tag separator comment
                todo.seek(len_pos)
                todo.write('\n#{:-^78}#\n'.format(
                    ' {} POSTS IN {} FILES '.format(index - 1, len(types))))
                # Move back to the end
                todo.seek(0, 2)
            except KeyError:
                continue
        print('CCOM: placed {!r}'.format(os_path_join(infolder, 'TODO')))
Esempio n. 48
0
def setup_json_scheduler(build_root, native):
  """Return a build graph and scheduler configured for BLD.json files under the given build root.

  :rtype :class:`pants.engine.scheduler.LocalScheduler`
  """

  symbol_table = ExampleTable()

  # Register "literal" subjects required for these tasks.
  # TODO: Replace with `Subsystems`.
  address_mapper = AddressMapper(build_patterns=('BLD.json',),
                                 parser=JsonParser(symbol_table))

  work_dir = os_path_join(build_root, '.pants.d')
  project_tree = FileSystemProjectTree(build_root)

  goals = {
      'compile': Classpath,
      # TODO: to allow for running resolve alone, should split out a distinct 'IvyReport' product.
      'resolve': Classpath,
      'list': BuildFileAddresses,
      GenGoal.name(): GenGoal,
      'ls': Snapshot,
      'cat': FilesContent,
    }
  tasks = [
      # Codegen
      GenGoal.rule(),
      gen_apache_java_thrift,
      gen_apache_python_thrift,
      gen_scrooge_scala_thrift,
      gen_scrooge_java_thrift,
      SingletonRule(Scrooge, Scrooge(Address.parse('src/scala/scrooge')))
    ] + [
      # scala dependency inference
      reify_scala_sources,
      extract_scala_imports,
      select_package_address,
      calculate_package_search_path,
      SingletonRule(SourceRoots, SourceRoots(('src/java','src/scala'))),
    ] + [
      # Remote dependency resolution
      ivy_resolve,
      select_rev,
    ] + [
      # Compilers
      isolate_resources,
      write_name_file,
      javac,
      scalac,
    ] + (
      create_graph_rules(address_mapper, symbol_table)
    ) + (
      create_fs_rules()
    )

  return LocalScheduler(work_dir,
                        goals,
                        tasks,
                        project_tree,
                        native)
Esempio n. 49
0
def header(infolder,
           line=comment_LINE,
           block=comment_BLOCK,
           include=INCLUDE,
           exclude=EXCLUDE,
           overwrite=False):
    # Compile regular expression pattern to match in scanned files
    pattern = re_compile(_COMMENT.format(r'|'.join(map(comment_escape, line)),
                                         *comment_block_comments(block)),
                         flags=re_DOTALL | re_VERBOSE | re_MULTILINE)
    # Define default values
    align = _FORMAT['CENTER']
    width = 80
    # Update values based on INFO file
    values = {}
    with open(os_path_join(infolder, 'INFO'), 'r', encoding='utf-8') as file:
        header = file.read()
        match = re_match(r'\s*#\s*format\s+'
                         r'((?P<align>CENTER|LEFT|RIGHT)\s+)?'
                         r'(?P<width>\d+)?', header)
        if match:
            align, width = match.group('align', 'width')
            align = _FORMAT.get(align, _FORMAT['CENTER'])
            try:
                width = int(width)
            except TypeError:
                pass
        # Add leading and trailing empty line
        header = '\n{}\n'.format(header[match.end():].strip())

    # Get file contents of special files
    for filename in _FILES:
        try:
            with open(os_path_join(infolder, filename), 'r', encoding='utf-8') as file:
                values[filename] = file.read().strip()
        except FileNotFoundError:
            values[filename] = ''

    # Get special values
    values['DATE'] = datetime.now().strftime('%Y.%m.%d')

    # Exclude containers
    except_dirs  = []  # relative path to dir from root
    except_files = []  # relative path to file from root
    except_names = []  # filename (with extension) anywhere
    except_exts  = []  # extension anywhere

    # If 'exclude' is dictionary like object
    try:
        _empty = ()
        # Excludes relative to root
        for key, container in zip(('folders', 'files'),
                                  (except_dirs, except_files)):
            container.extend(os_path_join(infolder, p) for p in exclude.get(key, _empty))
        # Excludes anywhere
        for key, container in zip(('names', 'extensions'),
                                  (except_names, except_exts)):
            container.extend(exclude.get(key, _empty))
    # If 'exclude' is an iterable object
    except AttributeError:
        except_names = exclude

    # Include containers
    permit_names = []  # filename (with extension) anywhere
    permit_exts  = []  # extension anywhere

    # If 'include' is dictionary like object
    try:
        _empty = ()
        # Includes anywhere
        for key, container in zip(('names', 'extensions'),
                                  (permit_names, permit_exts)):
            container.extend(include.get(key, _empty))
    # If 'include' is an iterable object
    except AttributeError:
        permit_names = include

    # Walk through all files and folders in the passed folder
    # FIXME: what if none of the files changed only INFO has been updated?
    # Scan through all files and folders
    with check_Checker(infolder, file='.clic_cache') as checker:
        for root, dirs, filenames in os_walk(infolder):
            # If skip this folder and all subfolders
            if root in except_dirs:
                dirs.clear()
                continue
            # Check all files in folder
            for filename in filenames:
                filepath = os_path_join(root, filename)[2:]
                # If skip this exact file
                if filepath in except_files:
                    continue
                name, extension = os_path_splitext(filename)
                # If file or extension is not banned and it is on the
                # white-list and it changed since last time checked and
                # this is not and overwrite-call
                if (filename not in except_names and
                    extension not in except_exts and
                    (extension in permit_exts or
                     filename  in permit_names) and
                    checker.ischanged(filepath) and
                    not overwrite):
                    values['SIZE'] = _size(os_path_getsize(filepath))
                    # FIXME: make it more generic than ./ -- what if ../../?
                    values['FILE'] = filepath[2:] if filepath.startswith('./') else filepath
                    values['FILE_NAME'] = file
                    values['FILE_BASE'] = name
                    if _comment(header.format(**values), filepath, pattern, align, width):
                        # Update checker after the file has been modified
                        checker.update()
                        # Report
                        print('CLIC: processed {!r}'.format(filepath))
Esempio n. 50
0
def document(infolder, outfolder, extension, loader, external_css=None,
             generate_toc=None, overwrite=False):
    # Get previously generated TOC object
    TOC = os_path_join(infolder, '.cdoc_toc')
    try:
        with open(TOC, 'rb') as file:
            old_toc = pickle_load(file)
    except (FileNotFoundError, EOFError):
        old_toc = table_Dict2D(OrderedDict)

    # Create new TOC object
    new_toc = table_Dict2D(OrderedDict)

    # TODO: do we really need a separate OrderedDict for pages ???
    pages = OrderedDict()
    anonym = iter_count()

    # TODO: Create real dependency graphs
    #       Document object:
    #           parents  = set()  # other documents depending on this document
    #           children = set()  # other documents this document depending on
    #
    #       If document changed:
    #           set all parents of document => changed
    #
    #       If any of its children changed:
    #           set all parents of child => changed
    #
    #       -- The loop should check if a document's change flag has already
    #          been set. If not, hash file, and set flag, and notify all
    #          dependencies (parents)

    # Load all pages
    with check_Checker(infolder, file='.cdoc_cache', lazy_update=True) as checker:
        # Go through all files
        for file in os_listdir(infolder):
            # If file has the proper extension
            if file.endswith(extension):
                # Create full file path
                filepath = os_path_join(infolder, file)
                # If file has been changed since last check
                if checker.ischanged(filepath) and not overwrite:
                    # Regenerate file
                    filename, pagename, depends = \
                        _process(infolder, file, filepath, pages, loader, anonym)
                # If file hasn't been changed
                else:
                    # If file has been cached before
                    try:
                        # Get previous infos
                        filename, depends = old_toc[filepath]
                        pagename = old_toc.otherkey(filepath)
                        pages[pagename] = None
                        # If any of the dependencies has changed
                        for dependency in depends:
                            if checker.ischanged(dependency) and not overwrite:
                                # Regenerate file
                                filename, pagename, depends = \
                                    _process(infolder, file, filepath, pages, loader, anonym)
                                break
                    # If file is new and hasn't been cached before
                    except KeyError:
                        # Generate it for the first time
                        filename, pagename, depends = \
                            _process(infolder, file, filepath, pages, loader, anonym)
                # Store new values
                new_toc[pagename:filepath] = filename, depends

    # If order changing, renaming, inserting, deleting, etc. happened
    if set(old_toc) - set(new_toc):
        for pagename, filepath in new_toc.keys():
            if pages[pagename] is None:
                _process(os_path_basename(filepath), filepath, pages, loader, anonym)

    # Write back TOC object
    with open(TOC, 'wb') as file:
        pickle_dump(new_toc, file, pickle_HIGHEST_PROTOCOL)
    # Generate Table of Content?
    if generate_toc is None:
        generate_toc = len(new_toc) > 1
    # Create documents
    _build(pages, outfolder, generate_toc, new_toc, external_css)
Esempio n. 51
0
POST_SPACING_FULL = POST_SPACING_HEAD + POST_SPACING_TAIL

FOREGROUND = 0
BACKGROUND = 1

# TODO: if platform does not support visible scrollbar
VISIBLE_SCROLLBAR = 15

MEDIA_WIDTH = 320
DEFAULT_WINDOW_POS_DIM = (NotImplemented, 0,
                          MEDIA_WIDTH + 2*POST_SPACING_FULL + VISIBLE_SCROLLBAR, 768)

#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
_SANS = 'Source Sans Pro'
_FONT_PATH = os_path_join('resources', 'font', 'TTF')
_BASE_PATH = os_path_join('resources', 'artwork')

#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Global variable, holds all the static GUI informations
# It is initialised as empty because a QApplication has to
# run before any Q* object could be made or set
CONSTANTS = {}


#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
_rgb = lambda s, a: QColor(*(int(_1+_2, 16) for _1, _2 in zip(*(iter(s+a),)*2)))

#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def _image(file, sub_folder, rotate=None):
    pixmap = QPixmap(os_path_join(_BASE_PATH, sub_folder, file))
Esempio n. 52
0
def set_gui_constants(parent):
    # TODO: after the design is settled, remove unnecessary font files
    weights = {'ExtraLight', 'Light', 'Semibold', 'Bold', 'Black'}
    weights ^= set(s + 'It' for s in copy(weights))
    weights ^= {'Regular', 'It'}
    # Load fonts
    fonts = QFontDatabase()
    for weight in weights:
        fonts.addApplicationFont(os_path_join(_FONT_PATH,
                                              'SourceSansPro-{}.ttf'.format(weight)))

    # TODO: distinguish in names:
    #       type_palette_property and type_color_property

    # Palettes
    CONSTANTS['text_color_dark']    = _palette(parent, FOREGROUND, '000000', 45)
    CONSTANTS['text_color_light']   = _palette(parent, FOREGROUND, 'ffffff', 30)
    CONSTANTS['text_color_light_selected']   = _palette(parent, FOREGROUND, 'ffffff', 85)
    # CONSTANTS['panel_color_light']  = _palette(parent, BACKGROUND, '808080')
    CONSTANTS['panel_color_dark']   = _palette(parent, BACKGROUND, '303030')
    CONSTANTS['panel_color_darker'] = _palette(parent, BACKGROUND, '101010')
    CONSTANTS['panel_color_error']  = _palette(parent, BACKGROUND, '000000', 35)

    # Colors
    CONSTANTS['shadow_color'] = _color('000000', 70)
    CONSTANTS['panel_color_light'] = _color('808080')

    DEBUG_ALPHA = 40
    CONSTANTS['debug1'] = _color('ffff00', DEBUG_ALPHA)
    CONSTANTS['debug2'] = _color('00ffff', DEBUG_ALPHA)
    CONSTANTS['debug3'] = _color('ff00ff', DEBUG_ALPHA)
    CONSTANTS['debug4'] = _color('ff0000', DEBUG_ALPHA)
    CONSTANTS['debug5'] = _color('00ff00', DEBUG_ALPHA)
    CONSTANTS['debug6'] = _color('0000ff', DEBUG_ALPHA)

    # Fonts
    CONSTANTS['text_font_title']   = QFont(_SANS, 16, QFont.Light)
    CONSTANTS['text_font_author']  = QFont(_SANS, 10, QFont.Normal, italic=True)
    CONSTANTS['text_font_numbers'] = QFont(_SANS, 12, QFont.Normal)
    CONSTANTS['text_font_generic'] = QFont(_SANS, 10, QFont.Normal)

    # Icons
    CONSTANTS['icon_scroll_up']   = _icon('icons_scroll.png')
    CONSTANTS['icon_scroll_down'] = _icon('icons_scroll.png', rotate=180)
    CONSTANTS['icon_recoub']      = _icon('icons_share.png')
    CONSTANTS['icon_like']        = _icon('icons_like.png')
    CONSTANTS['icon_featured']    = _icon('icons_featured.png')
    CONSTANTS['icon_newest']      = _icon('icons_newest.png')
    CONSTANTS['icon_random']      = _icon('icons_random.png')
    CONSTANTS['icon_hot']         = _icon('icons_hot.png')
    CONSTANTS['icon_featured_selected'] = _icon('icons_featured_selected.png')
    CONSTANTS['icon_newest_selected']   = _icon('icons_newest_selected.png')
    CONSTANTS['icon_random_selected']   = _icon('icons_random_selected.png')
    CONSTANTS['icon_hot_selected']      = _icon('icons_hot_selected.png')

    # Other images
    CONSTANTS['icon_no_avatar']  = _static('no_avatar.png')
    CONSTANTS['other_separator'] = _static('separator.png')

    # Animation
    CONSTANTS['anim_busy_dark']  = os_path_join(_BASE_PATH, 'motion', 'dark_loader.gif')
    CONSTANTS['anim_busy_light'] = os_path_join(_BASE_PATH, 'motion', 'light_loader.gif')
Esempio n. 53
0
def calculate_package_search_path(jvm_package_name, source_roots):
  """Return PathGlobs to match directories where the given JVMPackageName might exist."""
  rel_package_dir = jvm_package_name.name.replace('.', os_sep)
  specs = [os_path_join(srcroot, rel_package_dir) for srcroot in source_roots.srcroots]
  return PathGlobs.create_from_specs(Dirs, '', specs)
Esempio n. 54
0
def _build(sources, outfolder, gentoc, toc, external_css):
    for pagename, source in sources.items():
        # If there is no valid source
        if source is None:
            continue
        # Clear soup and add en empty, new body
        SOUP.body.decompose()
        new(SOUP.html, 'body')

        # Set title
        SOUP.html.head.title.string = pagename
        # Get essentail values
        filename, depends = toc[pagename]
        # Constants
        SECTIONS = 7
        # Build basic structure
        column1 = new(SOUP.body, 'div', id='column1')
        column2 = new(SOUP.body, 'div', id='column2')

        generic = new(column1, 'div', id='generic')
        sidebar = new(column1, 'div', id='sidebar')
        content = new(column2, 'div', id='content')

        # OPTIONAL: custom header section
        try:
            _html_format(generic, source['HEAD'], outfolder)
            new(generic, 'br')
        except KeyError:
            SECTIONS -= 1
        # OPTIONAL: custom menu in sidebar
        try:
            _html_format(sidebar, source['MENU'], outfolder)
            new(sidebar, 'br')
        except KeyError:
            SECTIONS -= 1
        # OPTIONAL: custom abstract and introduction
        try:
            _html_format(content, source['INFO'], outfolder)
            new(content, 'br')
        except KeyError:
            SECTIONS -= 1
        # OPTIONAL: index
        if gentoc:
            sidebar_type = new(sidebar, 'div')
            new(sidebar_type, 'p', class_='label', string='Modules:')
            new(sidebar, 'br')
            _indx_format(sidebar, pagename, toc)
            new(sidebar, 'br')

        # TODO: Implement a Schema validator for better user-feedback

        # TODO: add FOOT key

        # TODO: add EXEC to cdoc to add "interactive" python snippets to code
        # EXEC: |
        #   with open('VERSION') as file:
        #       # Insert to USER:About
        #       DOC[USER][0].insert(0, {'name': 'Version', 'info': file.read()})

        # OPTIONAL: text and code
        try:
            blocks = source['TEXT']
            for block in blocks:
                # Get the first element of the list as the section name
                try:
                    section = string_capwords(block[0])
                except IndexError:
                    continue
                sidebar_text = new(sidebar, 'div')
                new(sidebar_text, 'p', class_='label',
                                       string='{}:'.format(section))
                new(sidebar_text, 'br')
                content_text = new(content, 'div')
                new(content_text, 'h2', class_='title', string=section)
                for user in block[1:]:
                    _text_format(sidebar_text, content_text, user)
                new(sidebar, 'br')
        except KeyError:
            SECTIONS -= 1
        # OPTIONAL: user defined
        try:
            userdefs = source['USER']
            for userdef in userdefs:
                # Get the first element of the list as the section name
                try:
                    section = string_capwords(userdef[0])
                except IndexError:
                    continue
                sidebar_user = new(sidebar, 'div')
                new(sidebar_user, 'p', class_='label',
                                       string='{}:'.format(section))
                new(sidebar_user, 'br')
                content_user = new(content, 'div')
                new(content_user, 'h2', class_='title', string=section)
                for user in userdef[1:]:
                    _user_format(sidebar_user, content_user, user)
                new(sidebar, 'br')
        except KeyError:
            SECTIONS -= 1
        # OPTIONAL: type definitions
        try:
            types = source['TYPE']
            sidebar_type = new(sidebar, 'div')
            new(sidebar_type, 'p', class_='label', string='Types:')
            new(sidebar_type, 'br')
            content_type = new(content, 'div')
            new(content_type, 'h2', class_='title', string='Types')
            for type in types:
                _type_format(sidebar_type, content_type, type)
            new(sidebar, 'br')
        except KeyError:
            SECTIONS -= 1
        # OPTIONAL: function definitions
        try:
            funcs = source['FUNC']
            sidebar_func = new(sidebar, 'div')
            new(sidebar_func, 'p', class_='label', string='Functions:')
            new(sidebar_func, 'br')
            content_func = new(content, 'div')
            new(content_func, 'h2', class_='title', string='Functions')
            for func in funcs:
                _func_format(sidebar_func, content_func, func)
        except KeyError:
            SECTIONS -= 1

        # Create HTML file
        if SECTIONS:
            output = os_path_join(outfolder, filename)
            with open(output, 'w', encoding='utf-8') as file:
                file.write(SOUP.decode(formatter='html'))
                print('CDOC: {!r} processed'.format(output))
            continue
        print('CDOC: !!! WARNING !!! in {!r} no data provided'.format(pagename))
    # Create folder if not exists to css
    stylepath = os_path_join(outfolder, 'css')
    try:
        os_makedirs(stylepath)
    except OSError as e:
        if not (e.errno == errno_EEXIST and os_path_isdir(stylepath)):
            raise
    # Create CSS path
    stylesheet = os_path_join(stylepath, 'cdoc.css')
    # If using the user created custom CSS
    if external_css:
        copyfile(external_css, stylesheet)
    # If using the default CSS
    else:
        with open(stylesheet, 'w', encoding='utf-8') as file:
            file.write(STYLE)
            print('CDOC: {!r} processed'.format(stylesheet))