Exemple #1
0
def main(maya_version='2020'):
    """This is the main entry point of the program."""
    logger = logging.getLogger(__name__)
    database_file_path = os.path.join(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
        'maya-{0}-cpp-reference.docset'.format(maya_version), 'Contents',
        'Resources', 'docSet.dsidx')
    if not os.path.isfile(database_file_path):
        raise IOError('The database file: {0} does not exist!'.format(
            database_file_path))
    # Clean the database of existing entries
    clean_database(database_file_path)
    # Write entries
    docs_root = os.path.join(os.path.dirname(database_file_path), 'Documents')

    if not os.path.isdir(docs_root):
        raise IOError(
            'The documentation directory: {0} does not exist!'.format(
                docs_root))

    logger.debug('Inserting entries into database...')
    all_files = os.listdir(docs_root)
    logger.debug('Total number of files to process: {0}'.format(
        len(all_files)))

    jobs = []
    for s in chunk(all_files, 500):
        job = multiprocessing.Process(target=write_entries,
                                      args=(database_file_path, s, docs_root,
                                            maya_version))
        jobs.append(job)

    logger.debug('Num. of jobs scheduled: {0}'.format(len(jobs)))
    [j.start() for j in jobs]
    logger.info('Jobs submitted, please wait for them to complete!')
def main(docs_path, output_path):
    """This is the main entry point of the program."""

    logger = logging.getLogger(__name__)

    if not os.path.isdir(docs_path):
        raise IOError('The directory: {0} does not exist!'.format(docs_path))

    logger.info('Formatting documentation...')

    if os.path.isdir(output_path):
        logger.info('Removing existing directory...')
        shutil.rmtree(output_path)
    os.makedirs(output_path)

    all_files = os.listdir(docs_path)
    logger.debug('Total number of files to process: {0}'.format(
        len(all_files)))

    for f in all_files:
        if f.endswith('.js'):
            shutil.copy(os.path.join(docs_path, f), output_path)

            # NOTE: Copy to a second directory since some of the script links are
            # dynamically generated.
            scripts_dir = os.path.join(output_path, 'scripts')
            if not os.path.isdir(scripts_dir):
                os.makedirs(scripts_dir)
            try:
                shutil.copy(os.path.join(docs_path, f), scripts_dir)
            except:
                print('failed to copy: {}'.format(f))

    for f in all_files:
        if f.endswith('.png'):
            shutil.copy(os.path.join(docs_path, f), output_path)

            # NOTE: Copy to a second directory since some of the image links are
            # dynamically generated.
            images_dir = os.path.join(output_path, 'cpp_ref')
            if not os.path.isdir(images_dir):
                os.makedirs(images_dir)
            shutil.copy(os.path.join(docs_path, f), images_dir)

    for f in all_files:
        if f.endswith('.css'):
            shutil.copy(os.path.join(docs_path, f),
                        os.path.join(os.path.dirname(output_path), 'style'))

    num_threads = 4
    chunk_size = len(all_files) / num_threads
    jobs = []
    for s in chunk(all_files, chunk_size):
        job = multiprocessing.Process(target=format_file,
                                      args=(s, docs_path, output_path))
        jobs.append(job)

    logger.debug('Num. of jobs scheduled: {0}'.format(len(jobs)))
    [j.start() for j in jobs]
    logger.info('Jobs submitted, please wait for them to complete!')
Exemple #3
0
    def load(self, renderer):
        """Load level from disk"""
        fileManagerOutputLog.debug(directory +
                                   normpath("data/" + self.name + ".dat"))
        self.file = open(directory + normpath("data/" + self.name + ".world"),
                         'rb')
        self.mapmap = pickle.load(self.file)
        self.file.close()

        self.name = self.mapmap['name']
        self.displayname = self.mapmap['displayname']
        self.sizex = self.mapmap['sizex']
        self.sizey = self.mapmap['sizey']
        self.sizez = self.mapmap['sizez']
        self.playerx = self.mapmap['playerx']
        self.playery = self.mapmap['playery']
        self.playerz = self.mapmap['playerz']
        for chunkx in range(int(self.playerx / 16 - 5),
                            int(self.playerx / 16 + 6)):  # Generate worldmap
            for chunky in range(int(self.playery / 16 - 5),
                                int(self.playery / 16 + 6)):
                for chunkz in range(int(self.playerz / 16 - 5),
                                    int(self.playerz / 16 + 6)):
                    if chunkx >= 0 and chunky >= 0 and chunkz >= 0:
                        self.worldmap[str(chunkx)][str(chunky)][int(
                            chunkz)] = chunk(self, abs(chunkx), abs(chunky),
                                             abs(chunkz), renderer)
                    renderer.mouseWatchTask(renderer.worldin)
                    taskMgr.step()
                    pass
                pass
            pass
        pass
def main(mari_version='401', input_location=''):
    """This is the main entry point of the program."""

    logger = logging.getLogger(__name__)

    output_path = os.path.join(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
        'mari-{0}-python-api.docset'.format(mari_version), 'Contents',
        'Resources', 'Documents')

    logger.debug('Using source documentation from: {0}'.format(input_location))
    logger.info(
        'Formatting documentation to directory: {0}'.format(output_path))

    if os.path.isdir(output_path):
        logger.info('Cleaning out existing directory...')
        shutil.rmtree(output_path)
    os.makedirs(output_path)

    all_files = os.listdir(input_location)
    logger.debug('Total number of files to process: {0}'.format(
        len(all_files)))
    jobs = []
    for s in chunk(all_files, CHUNK_SIZE):
        job = multiprocessing.Process(target=format_file,
                                      args=(s, input_location, output_path))
        jobs.append(job)

    logger.debug('Num. of jobs scheduled: {0}'.format(len(jobs)))
    [j.start() for j in jobs]
    logger.info('Jobs submitted, please wait for them to complete!')
def main(maya_version='2020'):
    """This is the main entry point of the program."""

    logger = logging.getLogger(__name__)
    docs_path = os.path.join(os.path.dirname(
                            os.path.dirname(os.path.abspath(__file__))
                        ),
                          'maya-{0}-cpp-reference.docset'.format(maya_version),
                          'Contents',
                          'Resources',
                          'Documents')

    if not os.path.isdir(docs_path):
        raise IOError('The directory: {0} does not exist!'
                .format(docs_path))

    logger.info('Formatting documentation...')

    output_path = os.path.join(docs_path, 'output')
    if os.path.isdir(output_path):
        logger.info('Removing existing directory...')
        shutil.rmtree(output_path)
    os.makedirs(output_path)

    all_files = os.listdir(docs_path)
    logger.debug('Total number of files to process: {0}'.format(len(all_files)))
    jobs = []
    for s in chunk(all_files, 500):
        job = multiprocessing.Process(target=format_file, args=(s, docs_path, output_path))
        jobs.append(job)

    logger.debug('Num. of jobs scheduled: {0}'.format(len(jobs)))
    [j.start() for j in jobs]
    logger.info('Jobs submitted, please wait for them to complete!')
def main(docs_sources, output_path, fbx_version='2016', multi_thread=False):
    """
    This is the main entry point of the program.

    :param docs_sources: ``str`` path to the formatted documentation sources. This
        should be the root of the folder that contains the ``index.html`` formatted
        documentation.

    :param output_path: ``str`` path to write the docset database to.

    :param multi_thread: ``bool`` to indicate if multithreading support should be
        enabled.

    :param fbx_version: ``str`` indicating what version of FBX the docset database
        being generated for is.
    """
    logger = logging.getLogger(__name__)
    database_file_path = os.path.join(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
        'fbx-{0}-cpp.docset'.format(fbx_version), 'Contents', 'Resources',
        'docSet.dsidx')
    if not os.path.isfile(database_file_path):
        logger.debug(
            'The database file: {0} does not exist, creating it...'.format(
                database_file_path))
        open(database_file_path, 'w').close()
    # Clean the database of existing entries
    clean_database(database_file_path)
    # Write entries
    if not docs_sources:
        docs_sources = os.path.join(os.path.dirname(database_file_path),
                                    'Documents')

    if not os.path.isdir(docs_sources):
        raise IOError(
            'The documentation directory: {0} does not exist!'.format(
                docs_sources))

    logger.debug('Inserting entries into database...')
    all_files = os.listdir(docs_sources)
    logger.debug('Total number of files to process: {0}'.format(
        len(all_files)))

    if multi_thread:
        jobs = []
        for idx, s in enumerate(chunk(all_files, 500)):
            job = multiprocessing.Process(target=write_entries,
                                          args=(database_file_path, s,
                                                docs_sources, fbx_version,
                                                idx))
            jobs.append(job)

        logger.debug('Num. of jobs scheduled: {0}'.format(len(jobs)))
        [j.start() for j in jobs]
        logger.info('Jobs submitted, please wait for them to complete!')
    else:
        write_entries(database_file_path, all_files, docs_sources, fbx_version)
def main(mari_version=DEFAULT_MARI_VERSION, multiThread=False):
    """This is the main entry point of the program."""

    logger = logging.getLogger(__name__)

    database_file_path = os.path.join(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
        'mari-{0}-python-api.docset'.format(mari_version), 'Contents',
        'Resources', 'docSet.dsidx')
    if not os.path.isfile(database_file_path):
        raise IOError('The database file: {0} does not exist!'.format(
            database_file_path))

    # Clean the database of existing entries
    clean_database(database_file_path)

    # Write entries
    docs_root = os.path.join(os.path.dirname(database_file_path), 'Documents')

    if not os.path.isdir(docs_root):
        os.makedirs(docs_root)

    logger.debug('Inserting entries into database...')
    all_files = os.listdir(docs_root)
    logger.debug('Total number of files to process: {0}'.format(
        len(all_files)))

    if multiThread:
        jobs = []
        for s in chunk(all_files, 250):
            job = multiprocessing.Process(target=write_entries,
                                          args=(database_file_path, s,
                                                docs_root, mari_version))
            jobs.append(job)

        logger.debug('Num. of jobs scheduled: {0}'.format(len(jobs)))
        [j.start() for j in jobs]
        logger.info('Jobs submitted, please wait for them to complete!')
    else:
        write_entries(database_file_path, all_files, docs_root, mari_version)
        logger.info('Complete!')
Exemple #8
0
    def load(self, renderer, lb=None):
        self.file = open(directory + normpath("data/" + self.name + ".world"),
                         'rb')
        self.mapmap = pickle.load(self.file)
        self.file.close()

        self.name = self.mapmap['name']
        self.displayname = self.mapmap['displayname']
        self.sizex = self.mapmap['sizex']
        self.sizey = self.mapmap['sizey']
        self.sizez = self.mapmap['sizez']
        self.playerx = self.mapmap['playerx']
        self.playery = self.mapmap['playery']
        self.playerz = self.mapmap['playerz']
        self.worldmap = {}
        bar = 50
        for chunkx in range(0, 11):  # Load chunks
            self.worldmap[str(chunkx)] = {}
            for chunky in range(0, 11):
                self.worldmap[str(chunkx)][str(chunky)] = {}
                for chunkz in range(0, 11):
                    x = int(self.playerx / 16 - 5) + chunkx
                    y = int(self.playery / 16 - 5) + chunky
                    z = int(self.playerx / 16 - 5) + chunkz
                    print("Loading chunk %s, %s, %s" % (x, y, z))
                    if x >= 0 and y >= 0 and z >= 0:
                        bar += 1
                        self.worldmap[str(chunkx)][str(chunky)][str(
                            chunkz)] = chunk(self, x, y, z, renderer)
                        try:
                            lb['value'] = (100 / (11 * 11 * 11)) * bar
                        except:
                            pass
                    taskMgr.step()
                    pass
                pass
            pass
        pass
Exemple #9
0
 def loadchunks(
     self, renderer
 ):  # Unloads all chunks, and then loads the ones close to the player
     for chunkx in range(0, self.sizex):
         for chunky in range(0, self.sizey):
             for chunkz in range(0, self.sizez):
                 if chunkx <= self.playerx * 16 - 5 or chunkx >= self.playerx * 16 + 6:
                     self.worldmap[chunkx][chunky][chunkz].hidechunk()
                 elif chunky <= self.playery * 16 - 5 or chunky >= self.playery * 16 + 6:
                     self.worldmap[chunkx][chunky][chunkz].hidechunk()
                 elif chunkz <= self.playerz * 16 - 5 or chunkz >= self.playerz * 16 + 6:
                     self.worldmap[chunkx][chunky][chunkz].hidechunk()
     for chunkx in range(self.playerx / 16 - 5,
                         self.playerx / 16 + 6):  # Load chunks
         for chunky in range(self.playery / 16 - 5, self.playery / 16 + 6):
             for chunkz in range(self.playerz / 16 - 5,
                                 self.playerz / 16 + 6):
                 self.worldmap[chunkx][chunky][chunkz] = chunk(
                     self, chunkx, chunky, chunkz, renderer)
                 taskMgr.step()
                 pass
             pass
         pass
     pass
Exemple #10
0
#!/usr/bin/env python
import os, json
from lib import chunk

fp_out_all = open(os.path.join('tmp', 'chunks.json'), 'w')
all_chunks = {}
for volume in ['volume1', 'volume2', 'planning']:
    fp_in  = open(os.path.join('tmp', volume + '.txt' ), 'r')
    fp_out = open(os.path.join('tmp', volume + '.json'), 'w')
    chunks = chunk(fp_in)
    all_chunks.update(chunks)
    json.dump(chunks, fp_out)
json.dump(all_chunks, fp_out_all)
def main(docs_sources, output_path, multi_thread=False, max_version='2017'):
    """
    This is the main entry point of the program. It formats the HTML sources 
    specified in ``docs_sources`` and writes them to the ``output_path`` directory 
    specified.

    :param docs_sources: ``str`` path to the original documentation sources. This 
        should contain the ``cpp_ref`` folder, among other resources.
    
    :param output_path: ``str`` path to write the formatted HTML files to.

    :param multi_thread: ``bool`` to indicate if multithreading support should be 
        enabled.

    :param max_version: ``str`` indicating what version of 3ds max the docset 
        being generated for is.
    """
    logger = logging.getLogger(__name__)
    if not docs_sources:
        docs_sources = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
                                    'resources',
                                    max_version,
                                    'cpp_ref')
    if not output_path:
        output_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 
                                'max-{0}-cpp.docset'.format(max_version),
                                'Contents',
                                'Resources',
                                'Documents')
    if docs_sources == output_path:
        raise IOError('The source and output directories are the same!')
    if not os.path.isdir(docs_sources):
        raise IOError('The directory: {0} does not exist!'.format(docs_sources))

    logger.info('Formatting documentation...')

    if os.path.isdir(output_path):
        logger.debug('Removing existing directory: {0}!'.format(output_path))
        shutil.rmtree(output_path)
    os.makedirs(output_path)

    # NOTE (sonictk): Copy over the necessary resource files first
    styles_path = os.path.join(os.path.dirname(docs_sources), 'style')
    if not os.path.isdir(styles_path):
        raise IOError('The CSS styles directory: {0} does not exist!'.format(styles_path))
    [shutil.copy(os.path.join(styles_path, css), output_path) for css in os.listdir(styles_path)]

    scripts_path = os.path.join(os.path.dirname(docs_sources), 'scripts')
    if not os.path.isdir(scripts_path):
        raise IOError('The JScript directory: {0} does not exist!'.format(scripts_path))
    shutil.copytree(scripts_path, os.path.join(output_path, 'scripts'))

    all_files = os.listdir(docs_sources)
    logger.debug('Total number of files to process: {0}'.format(len(all_files)))
    for f in all_files:
        if os.path.splitext(f)[-1] != '.html':
            # NOTE (sonictk): Just copy it over anyway, since those files are needed (CSS, scripts etc.)
            shutil.copy(os.path.join(docs_sources, f), output_path)
    if multi_thread:
        jobs = []
        for idx, s in enumerate(chunk(all_files, 500)):
            job = multiprocessing.Process(target=format_files, args=(s, docs_sources, output_path, idx))
            jobs.append(job)
        logger.debug('Num. of jobs scheduled: {0}'.format(len(jobs)))
        [j.start() for j in jobs]
        logger.info('Jobs submitted, please wait for them to complete!')
    else:
        format_files(all_files, docs_sources, output_path)