Esempio n. 1
0
def read_conf_file(config_file_path):
    """read configuration file as json"""

    # Throw an error if the specified config files doesn't exist
    if not os.path.exists(config_file_path):
        print_err("INFO: course will be created for the first time")
        return False

    # Try to read the configurat    ion file data as JSON
    try:
        with open(config_file_path) as config:
            # Force python to maintain original order of JSON objects (or else the chapters and modules will appear out of order)
            # conf_data = json.load(config, object_pairs_hook=collections.OrderedDict)
            conf_data = json.load(config)
    except ValueError:  #, err
        # Error message handling based on validate_json.py (https://gist.github.com/byrongibson/1921038)
        #msg = err.message
        print_err("msg")

        if msg == 'No JSON object could be decoded':
            print_err(
                'ERROR: %s is not a valid JSON file or does not use a supported encoding\n'
                % config_file_path)
        else:
            err = ODSA_Config.parse_error(msg).groupdict()
            # cast int captures to int
            for k, v in err.items():
                if v and v.isdigit():
                    err[k] = int(v)

            with open(config_file_path) as config:
                lines = config.readlines()

            for ii, line in enumerate(lines):
                if ii == err['lineno'] - 1:
                    break

            print_err("""
    %s
    %s^-- %s
    """ % (line.replace("\n", ""), " " * (err['colno'] - 1), err['msg']))

        # TODO: Figure out how to get (simple)json to accept different encodings
        sys.exit(1)

    return conf_data
Esempio n. 2
0
def read_conf_file(config_file_path):
    """read configuration file as json"""

    # Throw an error if the specified config files doesn't exist
    if not os.path.exists(config_file_path):
        print_err("INFO: course will be created for the first time")
        return False

    # Try to read the configuration file data as JSON
    try:
        with open(config_file_path) as config:
            # Force python to maintain original order of JSON objects (or else the chapters and modules will appear out of order)
            # conf_data = json.load(config, object_pairs_hook=collections.OrderedDict)
            conf_data = json.load(config)
    except ValueError, err:
        # Error message handling based on validate_json.py (https://gist.github.com/byrongibson/1921038)
        msg = err.message
        print_err(msg)

        if msg == 'No JSON object could be decoded':
            print_err('ERROR: %s is not a valid JSON file or does not use a supported encoding\n' % config_file_path)
        else:
            err = ODSA_Config.parse_error(msg).groupdict()
            # cast int captures to int
            for k, v in err.items():
                if v and v.isdigit():
                    err[k] = int(v)

            with open(config_file_path) as config:
                lines = config.readlines()

            for ii, line in enumerate(lines):
                if ii == err['lineno'] - 1:
                    break

            print_err("""
    %s
    %s^-- %s
    """ % (line.replace("\n", ""), " " * (err['colno'] - 1), err['msg']))

        # TODO: Figure out how to get (simple)json to accept different encodings
        sys.exit(1)
Esempio n. 3
0
def configure(config_file_path, options):
  """Configure an OpenDSA textbook based on a validated configuration file"""
  global satisfied_requirements

  slides = options.slides

  print "Configuring OpenDSA, using " + config_file_path

  # Load and validate the configuration
  config = ODSA_Config(config_file_path)

  # Delete everything in the book's HTML directory, otherwise the post-processor can sometimes append chapter numbers to the existing HTML files, making the numbering incorrect
  html_dir = config.book_dir + config.rel_book_output_path
  if os.path.isdir(html_dir):
    print "Clearing HTML directory"
    shutil.rmtree(html_dir)

  # Add the list of topics the book assumes students know to the list of fulfilled prereqs
  if config.assumes:
    satisfied_requirements += [a.strip() for a in config.assumes.split(';')]

  # Optionally rebuild JSAV
  if config.build_JSAV:
    print "Building JSAV\n"
    status = 0

    with open(os.devnull, "w") as fnull:
      status = subprocess.check_call('make -s -C %s' % (config.odsa_dir + 'JSAV/'), shell=True, stdout=fnull)

    if status != 0:
      print_err("JSAV make failed")
      print_err(status)
      sys.exit(1)

  print "Writing files to " + config.book_dir + "\n"

  # Initialize output directory, create index.rst, and process all of the modules
  initialize_output_directory(config)
  generate_index_rst(config, slides)

  # Print out a list of any exercises found in RST files that do not appear in the config file
  if len(missing_exercises) > 0:
    print_err("\nExercises Not Listed in Config File:")

    for exercise in missing_exercises:
      print_err('  ' + exercise)

    # Print an extra line to separate this section from any additional errors
    print_err('')

  # Stop if we are just running a dry-run
  if options.dry_run:
    return

  # Entries are only added to todo_list if config.suppress_todo is False
  if len(todo_list) > 0:
    generate_todo_rst(config, slides)

  # Dump num_ref_map to table.json to be used by the Sphinx directives
  with open(config.book_dir + 'table.json', 'w') as num_ref_map_file:
    json.dump(num_ref_map, num_ref_map_file)

  # Dump module_chap_map to page_chapter.json to be used by the avmetadata directive
  # NOTE: avmetadata is deprecated (it was used to generate the concept map but is no longer used)
  # If avmetadata is eventually removed, we can stop writing this file
  with open(config.book_dir + 'page_chapter.json', 'w') as page_chapter_file:
    json.dump(module_chap_map, page_chapter_file)

  # Initialize options for conf.py
  options = initialize_conf_py_options(config, slides)

  # Create a Makefile in the output directory
  with open(config.book_dir + 'Makefile', 'w') as makefile:
    makefile.writelines(makefile_template % options)

  # Create conf.py file in output source directory
  with codecs.open(config.book_src_dir + 'conf.py', 'w', "utf-8") as conf_py:
    conf_py.writelines(conf % options)

  # Copy only the images used by the book from RST/Images/ to the book source directory
  for image in images:
    distutils.file_util.copy_file('%sRST/Images/%s' % (config.odsa_dir, image), config.book_src_dir + 'Images/')


  # Run make on the output directory
  print '\nBuilding textbook...'

  if slides:
    proc = subprocess.Popen(['make', '-C', config.book_dir, 'slides'], stdout=subprocess.PIPE)
  else:
    proc = subprocess.Popen(['make', '-C', config.book_dir], stdout=subprocess.PIPE)
  for line in iter(proc.stdout.readline,''):
    print line.rstrip()

  # Calls the postprocessor to update chapter, section, and module numbers, and glossary terms definition
  update_TOC(config.book_src_dir, config.book_dir + config.rel_book_output_path, module_chap_map)
  if 'Glossary' in processed_modules:
    update_TermDef(config.book_dir + config.rel_book_output_path + 'Glossary.html', cmap_map['concepts'])

    # Create the concept map definition file in _static html directory
    with codecs.open(config.book_dir + 'html/_static/GraphDefs.json', 'w', 'utf-8') as graph_defs_file:
      json.dump(cmap_map, graph_defs_file)
Esempio n. 4
0
def configure(config_file_path, options):
    """Configure an OpenDSA textbook based on a validated configuration file"""
    global satisfied_requirements

    slides = options.slides
    no_lms = options.no_lms
    standalone_modules = options.standalone_modules
    conf_data = None

    if no_lms or slides:
        conf_data = simple2full.generate_full_config(config_file_path, slides)

    print(("Configuring OpenDSA, using " + config_file_path))

    # Load and validate the configuration
    config = ODSA_Config(config_file_path,
                         options.output_directory,
                         options.no_lms,
                         conf_data=conf_data)

    # Delete everything in the book's HTML directory, otherwise the
    # post-processor can sometimes append chapter numbers to the existing HTML
    # files, making the numbering incorrect
    html_dir = config.book_dir + config.rel_book_output_path
    if os.path.isdir(html_dir):
        print("Clearing HTML directory")
        shutil.rmtree(html_dir, ignore_errors=True)
        # ignore_errors needed to delete files marked readonly or busy

    # Add the list of topics the book assumes students know to the list of
    # fulfilled prereqs
    if config.assumes:
        satisfied_requirements += [
            a.strip() for a in config.assumes.split(';')
        ]

    # Optionally rebuild JSAV
    if config.build_JSAV:
        print("We don't build JSAV anymore!\n")

    print(("Writing files to " + config.book_dir + "\n"))

    # local mode option
    config.local_mode = str(options.local).lower()

    # Initialize output directory, create index.rst, and process all of the
    # modules
    initialize_output_directory(config)
    generate_index_rst(config, slides, standalone_modules)

    # Print out a list of any exercises found in RST files that do not appear
    # in the config file
    if len(missing_exercises) > 0:
        print_err("\nExercises Not Listed in Config File:")

        for exercise in missing_exercises:
            print_err('  ' + exercise)

        # Print an extra line to separate this section from any additional
        # errors
        print_err('')

    # Stop if we are just running a dry-run
    if options.dry_run:
        return

    # Entries are only added to todo_list if config.suppress_todo is False
    if len(todo_list) > 0:
        generate_todo_rst(config, slides)

    # Dump num_ref_map to table.json to be used by the Sphinx directives
    with open(config.book_dir + 'table.json', 'w') as num_ref_map_file:
        json.dump(num_ref_map, num_ref_map_file)

    # Dump module_chap_map to page_chapter.json to be used by the avmetadata directive
    # NOTE: avmetadata is deprecated (it was used to generate the concept map but is no longer used)
    # If avmetadata is eventually removed, we can stop writing this file
    with open(config.book_dir + 'page_chapter.json', 'w') as page_chapter_file:
        json.dump(module_chap_map, page_chapter_file)

    # Initialize options for conf.py
    options = initialize_conf_py_options(config, slides)

    # Create a Makefile in the output directory
    with open(config.book_dir + 'Makefile', 'w') as makefile:
        makefile.writelines(config_templates.makefile_template % options)

    # Create conf.py file in output source directory
    with codecs.open(config.book_src_dir + 'conf.py', 'w', "utf-8") as conf_py:
        conf_py.writelines(config_templates.conf % options)

    # Copy only the images used by the book from RST/Images/ to the book
    # source directory
    for image in images:
        distutils.file_util.copy_file(
            '%sRST/Images/%s' % (config.odsa_dir, image),
            config.book_src_dir + 'Images/')

    # Run make on the output directory
    print('\nBuilding textbook...')

    job = ['make', '-C', config.book_dir]
    if slides:
        job.append('slides')
    else:
        job.append("html")
        job.append("min")

    # if make is visible to shutil, then no need to use shell
    ''' TODO: Test if shell_needed is always false, which would make some of the below code useless
    With the odsa docker update, we could safely assume 'make' to be usable and visible to shutil  
    '''
    shell_needed = shutil.which('make') is None
    if shell_needed:
        print(
            "WARNING: 'make' command is not visible from python... Doing leap of faith..."
        )

    print("$$$ Subprocess Started: " + " ".join(job), flush=True)
    proc = subprocess.run(job,
                          shell=shell_needed,
                          stdout=sys.stdout,
                          stderr=sys.stderr)
    if proc.returncode != 0:
        print_err("Creating eBook failed.  See above error")
        exit(1)
    print("$$$ Subprocess Complete: " + " ".join(job), flush=True)
    ''' TODO: Keep looking for encoding errors.
    These are because python 2.7 implicitly converted string encodings.
    python2.7 encodes strings IMplicitly, python3 does this EXplicitly instead.
    '''

    # Calls the postprocessor to update chapter, section, and module numbers,
    # and glossary terms definition

    book_dest_dir = config.book_dir + config.rel_book_output_path
    update_TOC(config.book_src_dir, book_dest_dir, module_chap_map,
               standalone_modules)
    if 'Glossary' in processed_modules:
        update_TermDef(
            config.book_dir + config.rel_book_output_path + 'Glossary.html',
            cmap_map['concepts'])

        # Create the concept map definition file in _static html directory
        with codecs.open(config.book_dir + 'html/_static/GraphDefs.json', 'w',
                         'utf-8') as graph_defs_file:
            json.dump(cmap_map, graph_defs_file)

    if not slides and not no_lms:
        make_lti(config, no_lms, standalone_modules)
Esempio n. 5
0
def configure(config_file_path, options):
    """Configure an OpenDSA textbook based on a validated configuration file"""
    global satisfied_requirements

    slides = options.slides

    print "Configuring OpenDSA, using " + config_file_path

    # Load and validate the configuration
    config = ODSA_Config(config_file_path, options.output_directory)

    # Delete everything in the book's HTML directory, otherwise the
    # post-processor can sometimes append chapter numbers to the existing HTML
    # files, making the numbering incorrect
    html_dir = config.book_dir + config.rel_book_output_path
    if os.path.isdir(html_dir):
        print "Clearing HTML directory"
        shutil.rmtree(html_dir)

    # Add the list of topics the book assumes students know to the list of
    # fulfilled prereqs
    if config.assumes:
        satisfied_requirements += [a.strip()
                                   for a in config.assumes.split(';')]

    # Optionally rebuild JSAV
    if config.build_JSAV:
        print "Building JSAV\n"
        status = 0

        with open(os.devnull, "w") as fnull:
            status = subprocess.check_call(
                'make -s -C %s' % (config.odsa_dir + 'JSAV/'), shell=True, stdout=fnull)

        if status != 0:
            print_err("JSAV make failed")
            print_err(status)
            sys.exit(1)

    print "Writing files to " + config.book_dir + "\n"

    # local mode option
    config.local_mode = str(options.local).lower()

    # Initialize output directory, create index.rst, and process all of the
    # modules
    initialize_output_directory(config)
    generate_index_rst(config, slides)

    # Print out a list of any exercises found in RST files that do not appear
    # in the config file
    if len(missing_exercises) > 0:
        print_err("\nExercises Not Listed in Config File:")

        for exercise in missing_exercises:
            print_err('  ' + exercise)

        # Print an extra line to separate this section from any additional
        # errors
        print_err('')

    # Stop if we are just running a dry-run
    if options.dry_run:
        return

    # Entries are only added to todo_list if config.suppress_todo is False
    if len(todo_list) > 0:
        generate_todo_rst(config, slides)


    # Dump num_ref_map to table.json to be used by the Sphinx directives
    with open(config.book_dir + 'table.json', 'w') as num_ref_map_file:
        json.dump(num_ref_map, num_ref_map_file)

    # Dump module_chap_map to page_chapter.json to be used by the avmetadata directive
    # NOTE: avmetadata is deprecated (it was used to generate the concept map but is no longer used)
    # If avmetadata is eventually removed, we can stop writing this file
    with open(config.book_dir + 'page_chapter.json', 'w') as page_chapter_file:
        json.dump(module_chap_map, page_chapter_file)

    # Initialize options for conf.py
    options = initialize_conf_py_options(config, slides)

    # Create a Makefile in the output directory
    with open(config.book_dir + 'Makefile', 'w') as makefile:
        makefile.writelines(makefile_template % options)

    # Create conf.py file in output source directory
    with codecs.open(config.book_src_dir + 'conf.py', 'w', "utf-8") as conf_py:
        conf_py.writelines(conf % options)

    # Copy only the images used by the book from RST/Images/ to the book
    # source directory
    for image in images:
        distutils.file_util.copy_file(
            '%sRST/Images/%s' % (config.odsa_dir, image), config.book_src_dir + 'Images/')

    # Run make on the output directory
    print '\nBuilding textbook...'

    if slides:
        proc = subprocess.Popen(
            ['make', '-C', config.book_dir, 'slides'], stdout=subprocess.PIPE)
    else:
        proc = subprocess.Popen(
            ['make', '-C', config.book_dir], stdout=subprocess.PIPE)
    for line in iter(proc.stdout.readline, ''):
        print line.rstrip()

    # Calls the postprocessor to update chapter, section, and module numbers,
    # and glossary terms definition
    update_TOC(config.book_src_dir, config.book_dir +
               config.rel_book_output_path, module_chap_map)
    if 'Glossary' in processed_modules:
        update_TermDef(
            config.book_dir + config.rel_book_output_path + 'Glossary.html', cmap_map['concepts'])

        # Create the concept map definition file in _static html directory
        with codecs.open(config.book_dir + 'html/_static/GraphDefs.json', 'w', 'utf-8') as graph_defs_file:
            json.dump(cmap_map, graph_defs_file)