示例#1
0
文件: commit.py 项目: gaecom/squadron
def _get_service_file(squadron_dir, service_name, service_ver, filename, on_error=None, config=None):
    """
    Grabs the named service file in a service directory

    Keyword arguments:
        squadron_dir -- base directory
        service_name -- the name of the service
        service_ver -- the version of the service
        filename -- the name of the service file without the extension
        empty_on_error -- if true, returns an empty dict instead of raising error
        config -- if a dict, uses it to template the file before loading it
    """
    ex = None
    for ext in extensions:
        try:
            serv_dir = os.path.join(squadron_dir, 'services', service_name, service_ver)
            service_file = os.path.join(serv_dir, filename + ext)
            if config:
                loader = FileLoader(squadron_dir)
                template = loader.load_template(service_file)
                return yaml.load(template.render(config, loader=loader))
            else:
                with open(service_file, 'r') as sfile:
                    return yaml.load(sfile.read())
        except (OSError, IOError) as e:
            if e.errno == errno.ENOENT:
                ex = e
            else:
                raise e

    if on_error is not None:
        return on_error
    raise ex
def write_segment_file(timestamp_start, timestamp_end, segment_filename, segment_title, segment_subtitle):
    template_loader = FileLoader('templates')
    timestamp_start_int = int(timestamp_start.translate(None, ":"))
    timestamp_end_int = int(timestamp_end.translate(None, ":"))

    output_segment_file_name_and_path = "./output/segments/" + segment_filename
    output_segment_file = open(output_segment_file_name_and_path, "w")
    output_segment_file.write("")
    output_segment_file.close()

    output_segment_file = open(output_segment_file_name_and_path, "a")
    #write file for current segment
    #WRITE SEGMENT HEADER
    item_template = template_loader.load_template('template_header.html')
    output_segment_file.write(item_template.render({'title': segment_title, 'subtitle': segment_subtitle},loader=template_loader).encode('utf-8'))

    #WRITE SEGMENT BODY ITEMS
    cur_row = 0
    input_file_path = "E:\Apollo17.org\MISSION_DATA\A17 master TEC and PAO utterances.csv"
    utterance_reader = csv.reader(open(input_file_path, "rU"), delimiter='|')
    for utterance_row in utterance_reader:
        cur_row += 1
        timeid = "timeid" + utterance_row[1].translate(None, ":")
        if utterance_row[1] != "": #if not a TAPE change or title row
            if (int(utterance_row[1].translate(None, ":")) >= timestamp_start_int) & (int(utterance_row[1].translate(None, ":")) < timestamp_end_int):
                item_template = template_loader.load_template('template_timelineitem.html')
                output_segment_file.write(item_template.render({'timeid': timeid, 'timestamp': utterance_row[1], 'who': utterance_row[2], 'words': utterance_row[3]}, loader=template_loader).encode('utf-8'))
            elif int(utterance_row[1].translate(None, ":")) > timestamp_end_int:
                break
        #if cur_row > 100:
        #    break

    #WRITE SEGMENT FOOTER
    item_template = loader.load_template('template_footer.html')
    output_segment_file.write(item_template.render({'datarow': 0}, loader=loader).encode('utf-8'))
示例#3
0
def gen_html(estimate):
    print("Rendering HTML")
    loader = FileLoader('.')
    template = loader.load_template('template.html')
    estimate_txt = estimate.strftime("%d %B")
    res = template.render(
        {
            'estimate':
            estimate_txt,
            'goalMs':
            round(estimate.timestamp()),
            'today':
            date.today().strftime("%d %B"),
            'ver':
            round(datetime.now().timestamp()),
            'goal':
            int(GOAL),
            'lastEntry':
            datetime.strptime(last_entry, "%Y-%m-%d").strftime("%d %B"),
            'vaccinated':
            first_dose
        },
        loader=loader).encode('utf-8')
    f = open("index.html", "w")
    f.write(res.decode("utf-8"))
    f.close()
示例#4
0
def gen():
    xmlTree = ET.parse('../../UserFiles/hw_conf.xml')
    root = xmlTree.getroot()

    number_of_node = int(root[1][0].text) * int(root[1][1].text)

    xmlTree = ET.parse('../../UserFiles/mapping.xml')
    root = xmlTree.getroot()

    # create a dictionary of list from mapping xml file
    mapping_dict = {}
    for element in root:
        if element.tag == 'process_mapping':
            mapping_dict[element.get('target')] = [
                element.get('target'),
                element.get('target').split('_')[1]
            ]

    list_map = list(mapping_dict)
    i = 0
    sw_scripts = []
    for i in range(len(list_map)):
        sw_scripts.clear()
        sw_scripts.append({
            'nodeName': list_map[i],
            'cpuName': 'nios2_' + list_map[i].split('_')[1],
            'index': list_map[i].split('_')[1]
        })
        loader = FileLoader('')
        template = loader.load_template('Templates/sw_script_template.sh')
        with open('../../sw_scripts/create_' + list_map[i] + '.sh',
                  'w',
                  encoding='utf-8') as f:
            f.write(template.render(locals()))
def main():

  parser = argparse.ArgumentParser(description='Beautify dpxdt-results.')
  parser.add_argument('directory',help='directory with dpxdt-results',action='store')

  args = parser.parse_args()

  tests = scanTests(args.directory)

  loader = FileLoader(os.path.dirname(__file__) + '/templates')
  template = loader.load_template('result.html')
  for test in tests:
    content = template.render(test, loader=loader).encode('utf-8')
    result_file = open(args.directory + "/" + test['filename'], "w")

    result_file.write(content)
    result_file.close()

    print "Created " + test['filename'] + "..."

  template = loader.load_template('index.html')
  content = template.render({ 'tests': tests }, loader=loader).encode('utf-8')

  result_file = open(args.directory + "/" + 'index.html', "w")
  result_file.write(content)
  result_file.close()

  print "Created index.html ..."
def scrape_data_to_html():
    timestamp = datetime.fromtimestamp(time.time()).strftime("%H:%M:%S on %A, %d %B, %Y")
    all_drinkers = drinkers_table.scan()
    drinkers = []
    for drinker in all_drinkers:
        if (drinker['code'] == None):
            drinker['code'] = "UNKNOWN"
        if (drinker['name'] == None):
            drinker['name'] = "UNKNOWN"
        if (drinker['volume_consumed'] == None):
            drinker['volume_consumed'] = 0
        if (drinker['number_of_drinks'] == None):
            drinker['number_of_drinks'] = 0
        d = {}
        d['code'] = drinker['code']
        d['name'] = drinker['name']
        d['volume_consumed'] = drinker['volume_consumed']
        d['number_of_drinks'] = drinker['number_of_drinks']
        drinkers.append(d)
    loader = FileLoader('templates')
    template = loader.load_template('drinks.html.template')
    webpage = template.render(locals())
    bucket = s3.get_bucket('kegerator')
    key = Key(bucket)
    key.key = 'drinks.html'
    key.content_type = 'text/html'
    key.set_contents_from_string(webpage)
    key.make_public()
def generate_html(votingResults):
    # Load html template for rendering
    template = FileLoader("").load_template(html_template_path)

    # Render voting results data
    html = template.render(votingResults)

    return html
示例#8
0
def index_page(posts, pg_count, more_pages):
    index_name = os.path.join("content", "posts", str(pg_count) + ".html")
    
    with open(index_name, 'wb') as f:
        loader   = FileLoader('html')
        template = loader.load_template('post.html')
        f.write(template.render({'posts': posts, 'pg_count' : pg_count + 1, 'more_pages' : more_pages},
                                loader=loader).encode('utf-8'))
示例#9
0
def render_template(template_file, **kwargs):
    """ Render a template and display it in stdout

    @param template_file: template filepath
    @param kwargs: key/value pairs of data to be rendered
    """
    loader = FileLoader('')
    template = loader.load_template(template_file)
    print template.render(kwargs, loader=loader)
示例#10
0
    def build_html(self):
        loader = FileLoader('html')
        template = loader.load_template('ladder_template.html')
        players = []

        for name in self.table:
            players.append({'name': name,
                            'rank': self.table.index(name) + 1})

        return template.render(locals(), loader=loader).encode('utf-8')
示例#11
0
    def __init__(self, group, ladder):
        self.group = group
        self.loader = FileLoader('html')
        self.template = self.loader.load_template('ladder_template.html')

        # put data in format ready for html templating
        self.html_players = []
        i = 0
        for player in ladder:
            i += 1
            self.html_players.append({'name': player.name, 'rank': i})
示例#12
0
def render_template(template_name, **context):
    """Renders a template from the template folder with the given
    context.

    :param template_name: the name of the template to be rendered
    :param context: the variables that should be available in the
    context of the template.
    """
    loader = FileLoader(stack.top.app.template_folder)
    template = loader.load_template(template_name)
    return template.render(context, loader=loader).encode('utf-8')
示例#13
0
def create_video_page(post):

    try:
        loader = FileLoader('html')
        template = loader.load_template('video.html')
        date = parser.parse(post["created_time"])
        video_id = post["id"]
        # TODO actually use the template to generate a page...

        src = ""
        if (post.has_key("object_id")):
            if not (download_other_groups_videos or download_group_videos):
                return
            src = "https://www.facebook.com/photo.php?v=" + post["object_id"]
        elif (post.has_key("source")):
            if not download_other_sites_videos: return
            src = post["source"]
        elif (post.has_key("link")):
            if not download_other_sites_videos: return
            src = post["link"]
        else:
            return

        # Download the video
        result = ydl.extract_info(src, download=False)
        if 'entries' in result:
            # Can be a playlist or a list of videos
            video = result['entries'][0]
        else:
            # Just a video
            video = result

        #print("Downloading Thumbnail: " + video["thumbnail"])
        download_picture(video["thumbnail"], video_id, True)

        video_name = video_id + "." + video["ext"]

        video_url = os.path.join("content", "videos", video_name)
        if not os.path.exists(video_url):
            tempfile = video["id"] + video["ext"]
            print "downloading " + video_name
            result = ydl.extract_info(src, download=True)
            os.rename(tempfile, video_url)

        post["video"] = video_name

    except facebook.GraphAPIError as e:
        print "Download failed for :" + str(video_id) + "\nError: " + e.message
    except youtube_dl.utils.DownloadError as e:
        print "Download failed for :" + str(video_id) + "\nError: " + e.message
    except KeyError as e:
        print "Complex output for data on this video :" + str(
            video_id) + "\nError: " + e.message
def main(argv):
    baseFolder = '/etc/nginxTemplates'
    outputFilePath = "/etc/nginx/nginx.conf"

    # Format template with values from environment
    loader = FileLoader(baseFolder)
    template = loader.load_template('nginx.conf.tmpl')
    renderedTemplate = template.render(templateValues,
                          loader=loader).encode('utf-8')

    # Save into logstash folder
    with open(outputFilePath, "w") as text_file:
        text_file.write("{0}".format(renderedTemplate))
def main(argv):
    baseFolder = '/etc/nginxTemplates'
    outputFilePath = "/etc/nginx/nginx.conf"

    # Format template with values from environment
    loader = FileLoader(baseFolder)
    template = loader.load_template('nginx.conf.tmpl')
    renderedTemplate = template.render(templateValues,
                                       loader=loader).encode('utf-8')

    # Save into logstash folder
    with open(outputFilePath, "w") as text_file:
        text_file.write("{0}".format(renderedTemplate))
def main(argv):
    baseFolder = argv[0] if len(argv) > 0 else '/etc/exchanges'
    outputFilePath = argv[1] if len(argv) > 1 else "/etc/logstash/conf.d/logstash.conf"

    # Format template with values from environment
    loader = FileLoader(baseFolder)
    template = loader.load_template('logstash.conf.tmpl')
    renderedTemplate = template.render(templateValues,
                          loader=loader).encode('utf-8')

    # Save into logstash folder
    with open(outputFilePath, "w") as text_file:
        text_file.write("{0}".format(renderedTemplate))
示例#17
0
class SliceView(MavaView):
  def __init__(self):
    super(SliceView, self).__init__()
    self.filename = "%s/slice" % self.filename
    self.loader = FileLoader(self.filename)

  def list(self, result):
    template = self.loader.load_template('list.txt')
    print template.render(result, loader=self.loader).encode('utf-8')

  def show(self, result):
    template = self.loader.load_template('show.txt')
    print template.render(result, loader=self.loader).encode('utf-8')
示例#18
0
def create_video_page(post):

   try:   
       loader   = FileLoader('html')
       template = loader.load_template('video.html')
       date     = parser.parse(post["created_time"])
       video_id = post["id"]
       # TODO actually use the template to generate a page...

       src = ""
       if(post.has_key("object_id")):
           if not (download_other_groups_videos or download_group_videos): return
           src = "https://www.facebook.com/photo.php?v=" + post["object_id"]
       elif(post.has_key("source")):
           if not download_other_sites_videos: return
           src = post["source"] 
       elif(post.has_key("link")):
           if not download_other_sites_videos: return
           src = post["link"] 
       else:
           return
          
       # Download the video
       result = ydl.extract_info(src, download=False)       
       if 'entries' in result:
           # Can be a playlist or a list of videos
           video = result['entries'][0]
       else:
           # Just a video
           video = result

       #print("Downloading Thumbnail: " + video["thumbnail"])
       download_picture(video["thumbnail"], video_id, True)
       
       video_name =  video_id + "." + video["ext"]

       video_url  = os.path.join("content", "videos", video_name)
       if not os.path.exists(video_url):
           tempfile = video["id"] + video["ext"]
           print "downloading " + video_name
           result = ydl.extract_info(src, download=True)
           os.rename(tempfile, video_url)

       post["video"] = video_name

   except facebook.GraphAPIError as e :
       print "Download failed for :" + str(video_id) + "\nError: "+e.message
   except youtube_dl.utils.DownloadError as e :
       print "Download failed for :" + str(video_id) + "\nError: "+e.message
   except KeyError as e :
       print "Complex output for data on this video :" + str(video_id) + "\nError: "+e.message
示例#19
0
def gen():
    xmlTree = ET.parse('../UserFiles/hw_conf.xml')
    root = xmlTree.getroot()
    number_of_node = int(root[1][0].text) * int(root[1][1].text)

    i = 0
    for i in range(number_of_node):
        loader = FileLoader('')
        template = loader.load_template('Templates/makefile')
        with open('../packet_transaction_lib/inc/header_' + str(i) +
                  '/makefile',
                  'w',
                  encoding='utf-8') as f:
            f.write(template.render(locals()))
def create_html_summary():
    loader = FileLoader(".")
    template = loader.load_template('index.html')
    repos = requests.get(
        "https://api.github.com/orgs/nationalarchives/teams/transfer-digital-records/repos?per_page=100",
        headers=headers).json()
    filtered_repos = sorted([repo["name"] for repo in repos if not repo["archived"] and not repo["disabled"]])

    for repo in filtered_repos:
        versions = get_versions(repo)
        if versions is not None:
            releases.append(versions)
    with open("output.html", "w") as output:
        output.write(template.render({'releases': releases}, loader=loader))
示例#21
0
def index_page(posts, pg_count, more_pages):
    index_name = os.path.join("content", "posts", str(pg_count) + ".html")

    with open(index_name, 'wb') as f:
        loader = FileLoader('html')
        template = loader.load_template('post.html')
        f.write(
            template.render(
                {
                    'posts': posts,
                    'pg_count': pg_count + 1,
                    'more_pages': more_pages
                },
                loader=loader).encode('utf-8'))
示例#22
0
def write_segment_file(timestamp_start, timestamp_end, segment_filename, segment_title, segment_subtitle):
    template_loader = FileLoader('templates')
    timestamp_start_int = int(timestamp_start.translate(None, ":"))
    timestamp_end_int = int(timestamp_end.translate(None, ":"))

    output_segment_file_name_and_path = "./_webroot/segments/" + segment_filename
    output_segment_file = open(output_segment_file_name_and_path, "w")
    output_segment_file.write("")
    output_segment_file.close()

    output_segment_file = open(output_segment_file_name_and_path, "a")
    #write file for current segment
    #WRITE SEGMENT HEADER
    item_template = template_loader.load_template('template_afj_header.html')
    output_segment_file.write(item_template.render({'title': segment_title, 'subtitle': segment_subtitle},loader=template_loader))

    #WRITE SEGMENT BODY ITEMS
    cur_row = 0
    # input_file_path = "../MISSION_DATA/A17 master TEC and PAO utterances.csv"
    # utterance_reader = csv.reader(open(input_file_path, "rU"), delimiter='|')
    combined_list = get_combined_transcript_list()

    for combined_list_item in combined_list:
        cur_row += 1
        timeid = "timeid" + combined_list_item.timestamp.translate(None, ":")
        if combined_list_item.timestamp != "": #if not a TAPE change or title row
            if (int(combined_list_item.timestamp.translate(None, ":")) >= timestamp_start_int) & (int(combined_list_item.timestamp.translate(None, ":")) < timestamp_end_int):
                if type(combined_list_item) is TranscriptItem:
                    words_modified = combined_list_item.words.replace("O2", "O<sub>2</sub>")
                    words_modified = words_modified.replace("H2", "H<sub>2</sub>")
                    who_modified = combined_list_item.who.replace("CDR", "Cernan")
                    who_modified = who_modified.replace("CMP", "Evans")
                    who_modified = who_modified.replace("LMP", "Schmitt")
                    item_template = template_loader.load_template('template_afj_item_utterance.html')
                    output_segment_file.write(item_template.render({'timeid': timeid, 'timestamp': combined_list_item.timestamp, 'who': who_modified, 'words': words_modified}, loader=template_loader))
                if type(combined_list_item) is CommentaryItem:
                    item_template = template_loader.load_template('template_afj_item_commentary.html')
                    output_segment_file.write(item_template.render({'who': combined_list_item.who, 'words': combined_list_item.words, 'attribution': combined_list_item.attribution}, loader=template_loader).encode('UTF-8'))
                if type(combined_list_item) is PhotographyItem:
                    item_template = template_loader.load_template('template_afj_item_photo.html')
                    output_segment_file.write(item_template.render({'description': combined_list_item.description, 'filename': combined_list_item.filename}, loader=template_loader))

            elif int(combined_list_item.timestamp.translate(None, ":")) > timestamp_end_int:
                break
                #if cur_row > 100:
                #    break

    #WRITE SEGMENT FOOTER
    item_template = loader.load_template('template_afj_footer.html')
    output_segment_file.write(item_template.render({'datarow': 0}, loader=loader).encode('utf-8'))
示例#23
0
class Htmlify:
    def __init__(self, group, ladder):
        self.group = group
        self.loader = FileLoader('html')
        self.template = self.loader.load_template('ladder_template.html')

        # put data in format ready for html templating
        self.html_players = []
        i = 0
        for player in ladder:
            i += 1
            self.html_players.append({'name': player.name, 'rank': i})

    def gen_html(self):
        html = self.template.render(locals(),
                                    loader=self.loader).encode('utf-8')
        self.write_html(html)

    def write_html(self, html):
        filepath = 'html/out/'
        try:
            with open(filepath + self.group + '.html', 'w') as f:
                f.writelines(html)
        except IOError:
            os.mkdir(filepath)
            self.write_html(html)
示例#24
0
def create_photo_page(picture_id):

    try:
        post = graph.get_object(picture_id)

        # for a reason I ignore the message from the post of this image
        # is in the name...
        if ("name" in post):
            post["name"] = cgi.escape(post["name"]).replace('\n', '<br />')
        if ("message" in post):
            post["message"] = cgi.escape(post["message"]).replace(
                '\n', '<br />')

        loader = FileLoader('html')
        template = loader.load_template('photo.html')
        date = parser.parse(post["created_time"])

        # TODO verify that the extension is correct...
        download_picture(post["source"] + "?type=large", picture_id)
        photo_url = os.path.join("..", "pictures", picture_id + ".jpg")

        file_name = os.path.join("content", "photos", post["id"] + ".html")

        # Download all the images for the comments.
        if post.has_key("comments"):
            post["all_comments"] = process_comments(post)

        with open(file_name, 'wb') as f:
            f.write(
                template.render(
                    {
                        'post': post,
                        'date': date,
                        'photo': photo_url
                    },
                    loader=loader).encode('utf-8'))
        return True
    except facebook.GraphAPIError as e:
        print "Oops!  failed to get this object:" + str(
            picture_id) + "\nError: " + e.message
        return False
    except KeyError as e:
        print "Oops! Failed to find information for this image:" + str(
            picture_id) + "\nError: " + e.message
        return False
示例#25
0
def ProcessSlpTmpl(tmplfile):
    service_filter = 'http.rdf'
    services_list = GetSlpServices(service_filter)

    # loader = FileLoader('html')
    loader = FileLoader('.')
    template = loader.load_template(tmplfile)
    generated_html = template.render(
        {
            'filter': service_filter,
            'services': services_list
        }, loader=loader).encode('utf-8')

    outfile = tmplfile + ".htm"
    fil = open(outfile, 'w')
    fil.write(generated_html)
    fil.close()
    return outfile
示例#26
0
def write_image_file(photo_object):
    """
    :type photo_object: PhotographyItem
    """
    ##--------------------------------- Write photo page
    template_loader = FileLoader('templates')
    output_photo_index_file_name_and_path = "./_webroot/mission_images/meta/" + photo_object.filename + ".html"
    output_photo_index_file = open(output_photo_index_file_name_and_path, "w")
    output_photo_index_file.write("")
    output_photo_index_file.close()

    output_photo_index_file = open(output_photo_index_file_name_and_path, "a")
    item_template = template_loader.load_template('template_photo_page.html')
    output_photo_index_file.write(item_template.render({'timestamp': photo_object.timestamp,
                                                        'photo_num': photo_object.photo_num,
                                                        'mag_code': photo_object.mag_code,
                                                        'mag_number': photo_object.mag_number,
                                                        'photographer': photo_object.photographer,
                                                        'description': photo_object.description},loader=template_loader))
示例#27
0
def _get_service_file(squadron_dir,
                      service_name,
                      service_ver,
                      filename,
                      on_error=None,
                      config=None):
    """
    Grabs the named service file in a service directory

    Keyword arguments:
        squadron_dir -- base directory
        service_name -- the name of the service
        service_ver -- the version of the service
        filename -- the name of the service file without the extension
        empty_on_error -- if true, returns an empty dict instead of raising error
        config -- if a dict, uses it to template the file before loading it
    """
    ex = None
    for ext in extensions:
        try:
            serv_dir = os.path.join(squadron_dir, 'services', service_name,
                                    service_ver)
            service_file = os.path.join(serv_dir, filename + ext)
            if config:
                loader = FileLoader(squadron_dir)
                template = loader.load_template(service_file)
                return yaml.load(template.render(config, loader=loader))
            else:
                with open(service_file, 'r') as sfile:
                    return yaml.load(sfile.read())
        except (OSError, IOError) as e:
            if e.errno == errno.ENOENT:
                ex = e
            else:
                raise e

    if on_error is not None:
        return on_error
    raise ex
示例#28
0
def gen():
    xmlTree = ET.parse('../UserFiles/mapping.xml')
    root = xmlTree.getroot()

    # create a dictionary of list from mapping xml file
    mapping_dict = {}
    i = 0
    for element in root:
        if element.tag == 'process_mapping':
            mapping_dict[element.get('target')] = [element.get('target'), element.get('target').split('_')[1]]

    list_map = list(mapping_dict)
    downloadShell_prjs = []
    for i in range(len(list_map)):
            downloadShell_prjs.append({'nodeName': list_map[i],
                                       'elfName': list_map[i] + '.elf',
                                       'instanceID': list_map[i].split('_')[1]})
    loader = FileLoader('')
    template = loader.load_template('Templates/download_template.sh')

    with open('../download.sh', 'w', encoding='utf-8') as f:
        f.write(template.render(locals()))
示例#29
0
def create_photo_page(picture_id):

   try:
       post = graph.get_object(picture_id)

       # for a reason I ignore the message from the post of this image
       # is in the name...
       if("name" in post):
           post["name"] = cgi.escape(post["name"]).replace('\n','<br />')
       if("message" in post):
           post["message"] = cgi.escape(post["message"]).replace('\n','<br />')

       loader   = FileLoader('html')
       template = loader.load_template('photo.html')
       date     = parser.parse(post["created_time"])

       # TODO verify that the extension is correct...
       download_picture(post["source"] + "?type=large", picture_id)
       photo_url = os.path.join("..", "pictures", picture_id + ".jpg")

       file_name = os.path.join("content", "photos", post["id"] + ".html")

       # Download all the images for the comments.
       if post.has_key("comments") :
           post["all_comments"] = process_comments(post)

       with open(file_name, 'wb') as f:
           f.write(template.render({'post': post, 'date' : date, 'photo' : photo_url},
                                   loader=loader).encode('utf-8'))
       return True
   except facebook.GraphAPIError as e:
       print "Oops!  failed to get this object:" + str(picture_id) + "\nError: "+e.message
       return False
   except KeyError as e:
       print "Oops! Failed to find information for this image:" + str(picture_id) + "\nError: "+e.message
       return False
示例#30
0
__author__ = 'Feist'
import csv
from quik import FileLoader

output_file_name_and_path = "./output/output.html"
outputFile = open(output_file_name_and_path, "w")
outputFile.write("")
outputFile.close()

outputFile = open(output_file_name_and_path, "a")

template_loader = FileLoader('templates')

#WRITE HEADER
item_template = template_loader.load_template('template_header.html')
outputFile.write(item_template.render({'datarow': 0}, loader=template_loader).encode('utf-8'))

#WRITE SEGMENT BODY ITEMS
cur_row = 0
input_file_path = "E:\Apollo17.org\MISSION_DATA\A17 master TEC and PAO utterances.csv"
utterance_reader = csv.reader(open(input_file_path, "rU"), delimiter='|')
for utterance_row in utterance_reader:
    cur_row += 1
    timeid = "timeid" + utterance_row[1].translate(None, ":")
    if utterance_row[1] != "": #if not a TAPE change or title row
        item_template = template_loader.load_template('template_timelineitem.html')
        outputFile.write(item_template.render({'timeid': timeid, 'timestamp': utterance_row[1], 'who': utterance_row[2], 'words': utterance_row[3]}, loader=template_loader).encode('utf-8'))


#WRITE FOOTER
item_template = template_loader.load_template('template_footer.html')
示例#31
0
    for utterance_row in utterance_reader:
        temp_obj = TranscriptItem(get_sec(utterance_row[0]), utterance_row[0], utterance_row[1], utterance_row[2], "TEC")
        master_list.append(temp_obj)

    return sorted(master_list, key=get_key, reverse=False)


output_file_name_and_path = "./_webroot/A14FJ_remaining.html"
output_file = open(output_file_name_and_path, "w")
output_file.write("")
output_file.close()

output_file = open(output_file_name_and_path, "a")

# WRITE HEADER
template_loader = FileLoader('templates')
item_template = template_loader.load_template('template_afj_header.html')
output_file.write(item_template.render({'datarow': 0}, loader=template_loader).encode('utf-8'))

cur_row = 0
# input_file_path = "../MISSION_DATA/A17 master TEC and PAO utterances.csv"
# utterance_reader = csv.reader(open(input_file_path, "rU"), delimiter='|')
combined_list = get_combined_transcript_list()

timestamp_start_int = 913950

for combined_list_item in combined_list:
    timeid = "timeid" + combined_list_item.timestamp.translate(None, ":")
    if combined_list_item.timestamp != "":  # if not a TAPE change or title row
        if int(combined_list_item.timestamp.translate(None, ":")) >= timestamp_start_int:
            cur_row += 1
示例#32
0
 def _create_conf_file(self):
     loader = FileLoader("/")
     template = loader.load_template(self.env.abs_conf_tmpl())
     nc_conf = template.render(self.env)
     with open(self.env.abs_conf_file(), "w") as fd:
         fd.write(nc_conf)
示例#33
0
            # if len(timeStamp) == 10:
            #     timeStamp = "0" + timeStamp
            message = match.group(2)
            timeStrISO = datetime.strftime(timeVal, "%Y-%m-%dT%H:%M:%S-06:00")
            eventDataRow = timeStrISO + '|' + message
            eventList.append(eventDataRow)
    eventList.sort()
    f = open(runProcessedPath + "/system_events.csv", "w")
    print('Writing consolidated event list in ' + runProcessedPath +
          '/system_events.csv')
    for event in eventList:
        f.write(event + '\n')
    f.close()

    # ------------------ Write system events html file
    template_loader = FileLoader('./templates')

    output_TOC_file_name_and_path = runProcessedPath + "/system_events.html"
    output_TOC_file = open(output_TOC_file_name_and_path, "w")
    output_TOC_file.write("")
    output_TOC_file.close()

    output_TOC_file = open(output_TOC_file_name_and_path, "ab")

    # WRITE TOC ITEMS
    prev_depth = 0
    depth_comparison = "false"
    timestamp = ""
    inputFilePath = runProcessedPath + "/system_events.csv"
    csv.register_dialect('pipes',
                         delimiter='|',
示例#34
0
 def _create_conf_file(self):
     loader  = FileLoader('/')
     template = loader.load_template(self.env.abs_conf_tmpl())
     conf = template.render(self.env)
     with open(self.env.abs_conf_file(), 'w') as fd:
         fd.write(conf)
示例#35
0
def gen():
    src = r'../../UserFiles/process_codes'
    dest = r'../../templateEngine/applicationEngine'

    os.chdir(src)
    for file in glob.glob("*"):
        copyfile(src + "\\" + file, dest + "\\" + file)
    os.chdir('../../templateEngine/applicationEngine')
    xmlTree = ET.parse('../../UserFiles/hw_conf.xml')
    root = xmlTree.getroot()

    number_of_node = int(root[1][0].text) * int(root[1][1].text)

    xmlTree = ET.parse('../../UserFiles/application.xml')
    root = xmlTree.getroot()

    processes = []

    for element in root:
        if element.tag == 'process':
            temp = {}
            temp['process_name'] = element.get('name').split('_')[1]
            temp['numOfInp'] = element.get('numOfInp')
            temp['numOfOut'] = element.get('numOfOut')
            for subElement in element:
                temp2 = {}
                if subElement.tag == 'port':
                    temp2['ID'] = subElement.get('ID')
                    temp2['name'] = subElement.get('name')
                    temp2['type'] = subElement.get('type')
                    temp2['direction'] = subElement.get('direction')
                    temp2['direction_lowercase'] = subElement.get('direction').lower()
                    temp2['numOfToken'] = subElement.get('numOfToken')
                    temp2['process_name'] = element.get('name').split('_')[1]
                    if subElement.get('direction') == 'INP':
                        if subElement.get('init_val') != '':
                            k = 0
                            init_arr = []
                            for init_vall in subElement.get('init_val').split(','):
                                init_temp = {}
                                init_temp['index'] = k
                                init_temp['value'] = subElement.get('init_val').split(',')[k]
                                init_arr.append(init_temp)
                                k +=1
                            temp2.update({'initial_val': init_arr})
                            # temp2['init_val'] = subElement.get('init_val')
                    temp.update({subElement.get('name'): temp2})
                elif subElement.tag == 'source_file':
                    temp.update({'source_file': subElement.get('name').split('_')[1]})
            processes.append(temp)

    i = 0
    nodePR = []
    ports = []
    for i in range(number_of_node):
        nodePR = []
        ports = []
        for process in processes:
            if process['source_file'] == str(i):
                nodePR.append(process)
                for port in process:
                    if isinstance(process[port], dict):
                        ports.append(process[port])
        loader = FileLoader('')
        template = loader.load_template('Templates/node_template.c')
        with open('../../sw_sources/node_' + str(i) + '.c', 'w',encoding='utf-8') as f:
            L = ['#include "sys/alt_stdio.h"\n',
                 '#include "altera_avalon_fifo_regs.h"\n',
                 '#include "altera_avalon_fifo_util.h"\n',
                 '#include "sys/alt_irq.h"\n',
                 '#include <stdio.h>\n',
                 '#include <stdint.h>\n',
                 '#include "packet_transaction_util.h"\n',
                 '#include <unistd.h>\n\n',
                 '#define ALMOST_EMPTY 2\n',
                 '#define ALMOST_FULL 11\n']
            f.writelines(L)
            f.write(template.render(locals(), loader))
示例#36
0
# ----------------------------
def main():
    moduleManager.loadmodulelist('WebManager', configreader.globalconf)

    try:
        server = ThreadedHTTPServer(
            ('', int(configreader.webconf['httpport'])), HttpRequestHandler)
        if (configreader.webconf['usessl'] == 'yes'):
            # If ssl is activated, wrap the socket with SSL layer
            server.socket = ssl.wrap_socket(
                server.socket,
                keyfile=configreader.webconf['keyfile'],
                certfile=configreader.webconf['certfile'],
                server_side=True)
        print 'started httpserver...'
        server.serve_forever()
    except KeyboardInterrupt:
        print '^C received, shutting down server'
        server.socket.close()


# -------------------------------------
# Create global objects and run main()
# -------------------------------------
if __name__ == '__main__':
    # Create global objects that will be shared with all thread
    moduleManager = ModuleImporter(configreader.moduleconf)
    tplloader = FileLoader(configreader.webconf['templatesdir'])
    sessionmanager = SessionManager()
    main()
示例#37
0
def gen():

    tree = ET.parse('../UserFiles/hw_conf.xml')
    root = tree.getroot()

    project_properties = {
        'DEVICE_FAMILY': root[0][0].text,
        'DEVICE': root[0][1].text
    }
    NoC = {
        'RowNo': root[1][0].text,
        'ColNo': root[1][1].text,
        'AddrWidth': root[1][2].text,
        'DataWidth': root[1][3].text,
        'PackWidth': root[1][4].text,
        'PhyCh': root[1][5].text,
        'PhyChAddr': root[1][6].text,
        'PhyRoCh': root[1][7].text,
        'PhyRoChAddr': root[1][8].text,
        'RoCh': root[1][9].text,
        'RoChAddr': root[1][10].text,
        'ViCh': root[1][11].text,
        'ViChAddr': root[1][12].text
    }

    tiles = []
    processors = []
    numOfTiles = int(NoC['RowNo']) * int(NoC['ColNo'])

    RowNoList = []
    ColNoList = []
    for i in range(0, int(NoC['RowNo'])):
        RowNoList.insert(i, i)
    for i in range(0, int(NoC['ColNo'])):
        ColNoList.insert(i, i)

    # must change
    # needs to insert the number of processors in Project-Properties
    numOfProcessors = numOfTiles

    # default values for tiles
    for j in range(0, int(NoC['RowNo'])):
        for i in range(0, int(NoC['ColNo'])):
            tiles.insert(
                j * int(NoC['ColNo']) + i, {
                    'j': j,
                    'i': i,
                    'number': j * int(NoC['ColNo']) + i,
                    'node_type': 'processor',
                    'node_name': 'NIOS II/e',
                    'memory_size': '32768.0',
                    'fifo_in_depth': '16',
                    'fifo_out_depth': '16'
                })

    # default values for processors
    for i in range(0, numOfProcessors):
        processors.insert(
            i, {
                'number': str(i),
                'node_type': 'processor',
                'node_name': 'NIOS II/e',
                'memory_size': '16384.0',
                'fifo_in_depth': '16',
                'fifo_out_depth': '16'
            })

    # change the values of tiles and processors, if it needs
    for tile in root[2]:
        if tile[0].text == "processor":
            for child in tile:
                processors[int(
                    tile[0].attrib['number'])][child.tag] = child.text
        for child in tile:
            tiles[int(tile.attrib['number'])][child.tag] = child.text

    loader = FileLoader('Templates')

    # generate qsys_system.tcl
    template = loader.load_template('qsys_system.tcl')
    with open("../hw_scripts/qsys_system.tcl", 'w', encoding='utf-8') as f:
        f.write(template.render(locals()))

    # generate NoC.vhd
    template = loader.load_template('NOC.vhd')
    with open("../hw_sources/NOC.vhd", 'w', encoding='utf-8') as f:
        f.write(template.render(locals()))

    # generate NoC_hw.tcl
    template = loader.load_template('NoC_hw.tcl')
    with open("../hw_scripts/NoC_hw.tcl", 'w', encoding='utf-8') as f:
        f.write(template.render(locals()))

    # generate synth_qsys.tcl
    template = loader.load_template('synth_qsys.tcl')
    with open("../hw_scripts/synth_qsys.tcl", 'w', encoding='utf-8') as f:
        f.write(template.render(locals()))

    # generate wrapper.v
    template = loader.load_template('constraints.tcl')
    with open("../hw_scripts/constraints.tcl", 'w', encoding='utf-8') as f:
        f.write(template.render(locals()))

    # generate wrapper.v
    template = loader.load_template('wrapper.v')
    with open("../hw_sources/wrapper.v", 'w', encoding='utf-8') as f:
        f.write(template.render(locals()))
示例#38
0
# Work through the image files and create a dictionary of data for them.
for f in vid_files:
    v_id  = regex.search(f).group(1)
    image = os.path.join("pictures/" + v_id + ".jpg")
    videos.append({"id" : v_id, "file" : f, "image" : image})

# PICTURES
# ----------
pictures = list_files(os.path.join("content", "pictures"), "[^_]*\.jpg")

# POSTS
# ----------
post_files = list_files(os.path.join("content", "posts"), ".*\.html")
posts = []
# Work through the image files and create a dictionary of data for them.
for f in post_files:
    p_id  = regex.search(f).group(1)
    posts.append({"page" : p_id, "file" : f})

posts = sorted(posts, key=lambda p : int(p.get("page")))

with open(index_name, 'wb') as f:
    loader   = FileLoader('html')
    template = loader.load_template('index.html')
    f.write(template.render({'user_icons': user_icons, 'friend_count': len(user_icons),
                             'videos': videos, 'video_count': len(videos),
                             'pictures': pictures,
                             'posts': posts},
                             loader=loader).encode('utf-8'))
    
示例#39
0
from quik import FileLoader

loader = FileLoader('../templates/tests')
template = loader.load_template('quikTemplate.html')
print template.render({'author': 'Thiago Avelino'},
                      loader=loader).encode('utf-8')
示例#40
0
from os.path import isfile, getsize
from mimetypes import guess_type
from datetime import datetime
from quik import FileLoader
from db import *
import threading

loader = FileLoader('')


def get_size(resource):
    file_size = 0
    if isfile(resource):
        file_size = getsize(resource)
    return file_size


def get_mime_type(file):

    mime_type = b'text/html'

    if get_size(file) > 0:
        (mime_type, encoding) = guess_type(file)
        mime_type = mime_type.encode('ASCII')
    return mime_type


def gen_status(file_size):

    status = b'HTTP/1.1 '
示例#41
0
 def __init__(self, basedir):
     if not os.path.isabs(basedir):
         basedir = os.path.abspath(basedir)
     self.loader = FileLoader(basedir)
     self.basedir = basedir
示例#42
0
class DirectoryRender:
    def __init__(self, basedir):
        if not os.path.isabs(basedir):
            basedir = os.path.abspath(basedir)
        self.loader = FileLoader(basedir)
        self.basedir = basedir

    def render(self, destdir, inputhash, resources, dry_run = False,
            currpath = "", config = {}):
        """
        Transforms all templates and downloads all files in the directory
        supplied with the input values supplied. Output goes in destdir.

        Keyword arguments:
            destdir -- the directory to put the rendered files into
            inputhash -- the dictionary of input values
        """
        items = sorted(os.listdir(os.path.join(self.basedir, currpath)))

        if currpath == "" and 'config.sq' in items:
            # do config.sq stuff only in the top level directory
            config_items = self.parse_config_sq(
                    os.path.join(self.basedir, 'config.sq'), inputhash)

            real_config = {}
            for config_item in config_items:
                real_config[config_item.filepath] = config_item

            config = real_config
            items.remove('config.sq')

        result = {}
        already_configured = set()
        for filename in items:
            # the path of the source file relative to the basedir
            relpath = os.path.join(currpath, filename)

            # the absolute path of the source file
            abs_source = os.path.join(self.basedir, relpath)

            # the absolute path of the destination file, templated
            dest = self._template(os.path.join(destdir, relpath), inputhash)

            if os.path.isdir(abs_source):
                mkdirp(dest)
                # Needs a slash because this is a directory
                stripped = dest[len(destdir)+1:] + os.path.sep

                key = relpath + os.path.sep
                if key in config:
                    result[key] = config[key].atomic

                apply_config(destdir, get_config(dest, stripped, config, already_configured), dry_run)
                result.update(self.render(destdir, inputhash, resources, dry_run, relpath, config))
            else:
                ext = get_sq_ext(filename)
                if ext in extension_handles:
                    # call the specific handler for this file extension
                    finalfile = extension_handles[ext](**{'loader':self.loader,
                        'inputhash':inputhash,
                        'relpath':relpath,
                        'abs_source': abs_source,
                        'dest':dest,
                        'resources':resources})
                else:
                    # otherwise, just copy the file
                    copyfile(abs_source, dest)
                    finalfile = dest

                finalext = get_file_ext(finalfile)
                stripped = finalfile[len(destdir)+1:]
                if os.path.isdir(finalfile):
                    stripped = stripped + os.path.sep
                    if stripped in config:
                        result[stripped] = config[stripped].atomic

                apply_config(destdir, get_config(finalfile, stripped, config, already_configured), dry_run)

                # if there's an automatic way to test this type of file,
                # try it
                if finalext in autotest.testable:
                    if not autotest.testable[finalext](finalfile):
                        raise ValueError('File {} didn\'t pass validation for {}'.format(finalfile, finalext))

        return result

    def _template(self, item, config):
        template = Template(item)
        return template.render(config)

    def parse_config_sq(self, filename, inputhash):
        """
        Parses a config.sq file which contains metadata about files in this
        directory.

        Keyword arguments:
            filename -- the config.sq file to open
            inputhash -- the service variables to template config.sq with
        """
        # Conversion functions from string to the correct type, str is identity
        convert = {'atomic': bool, 'user':str, 'group':str, 'mode':str}
        require_dir = set(['atomic'])
        result = []

        template = self.loader.load_template(filename)
        contents = template.render(inputhash, loader=self.loader)

        for line in contents.split('\n'):
            # These are the default values
            item = {'atomic': False, 'user':None, 'group':None, 'mode':None}

            if line == "" or line.startswith("#"):
                continue

            args = line.split()
            if len(args) < 2:
                raise ValueError('Line "{}" in file {} isn\'t formatted correctly'.format(line, filename))

            filepath = args[0]
            for arg in args[1:]:
                (key, value) = arg.split(':', 2)

                if key in require_dir:
                    # if this key requires us to be a directory, check
                    if not filepath.endswith(os.path.sep):
                        raise ValueError('Key {} requires entry {} to end with' +
                                ' a slash (must be directory) in file {}'.format(key, filepath, filename))

                #Only do work if we know about this parameter
                if key in convert:
                    item[key] = convert[key](value)
                else:
                    raise ValueError('Unknown config.sq value {} in file {}'.format(key, filename))
            result.append(FileConfig(filepath, item['atomic'], item['user'], item['group'], item['mode']))

        return result
示例#43
0
 def __init__(self):
   super(SliceView, self).__init__()
   self.filename = "%s/slice" % self.filename
   self.loader = FileLoader(self.filename)
示例#44
0
文件: main.py 项目: tasaif/febe
#!/usr/bin/env python

import web
import os
from quik import FileLoader
loader = FileLoader('html')


class Template:
    def GET(self, path):
        user_data = web.input()
        template = loader.load_template("../template/" + path)
        return template.render(user_data, loader=loader).encode('utf-8')


urls = (
    '/template/(.*)',
    'Template',
)
app = web.application(urls, globals())

if __name__ == "__main__":
    app.run()
示例#45
0
def main_emucamp_engine():
	logging.info('Emucamp-Engine')

	##	Copy the Twitter Boostrap to the www root
	for resource_folder in RESOURCES_FOLDERS:
		extern_dest = os.path.join(SITE_ROOT, resource_folder)
		if os.path.exists(extern_dest):
			shutil.rmtree(extern_dest)
		shutil.copytree(os.path.join(RESOURCES_ROOT, resource_folder), extern_dest)

	##	Initialize the mime type for further detection
	init_mime_detection()

	##	Initialize the template handler (Quik)
	quik_loader = FileLoader(RESOURCES_ROOT)

	##	Main 'machines' loop
	if len(machine_update_only) > 0:
		download_machine_list = machine_update_only
	else:
		download_machine_list = machine_list

	for machine_short_name in download_machine_list:
		tree = parse_machine_from_xml_source(machine_short_name)
		if tree is not None:
			root = tree.getroot()
			if root.tag == 'data':
				for child in root:
					logging.debug('child.tag = ' + child.tag)
					##	Look for a 'machine'
					if child.tag == 'machine':
						machine = child
						machine_name = machine.get('name')
						machine_type = machine.get('type')
						print('Found this machine : ' + machine_name)
						print('-------------------------------------')
						##	Creates a new html template
						template_machine = quik_loader.load_template(INPUT_PAGES['machine'])

						quik_interface['machine_name'] = machine_name
						quik_interface['machine_filename'] = conform_string_to_filename(machine_name)
						quik_interface['emulator_list'] = []

						##	Creates the folder to store the machine's data
						machine_root_path = os.path.join(SITE_ROOT, conform_string_to_filename(machine_name))
						safe_makedir(machine_root_path)

						found_a_description = False

						for machine_child in machine:
							##	Is this the description of the machine ?
							if machine_child.tag == 'description':
								quik_interface['machine_description'] = extract_text(machine_child, machine_root_path)
								quik_interface['machine_description_source_url'] = extract_source_url(machine_child, machine_root_path)
								quik_interface['machine_description_source'] = quik_interface['machine_description_source_url']
								found_a_description = True

							##	Is this an emulator for this machine ?
							if machine_child.tag == 'emulator':
								emulator = machine_child

								##	What is the name of this emulator
								emulator_name = emulator.get('name')

								emulator_root_path = os.path.join(machine_root_path, conform_string_to_filename(emulator_name))
								safe_makedir(emulator_root_path)

								current_emulator = {	'name': emulator_name, 'emulator_description': None,
														'emulator_version_list':[]	}

								##	Is this an emulator for this machine
								for emulator_child in emulator:
									##	Is this the description of this emulator ?
									if emulator_child.tag == 'description':
										current_emulator['emulator_description'] = extract_text(emulator_child, emulator_root_path)
										current_emulator['emulator_description_source_url'] = extract_source_url(emulator_child, emulator_root_path)
										current_emulator['emulator_description_source'] = current_emulator['emulator_description_source_url']

									if emulator_child.tag == 'platform':
										platform = emulator_child
										platform_name = platform.get('name')
										print(emulator_name + ', found a version for the ' + platform_name + ' platform.')

										platform_root_path = os.path.join(emulator_root_path, conform_string_to_filename(platform_name))
										safe_makedir(platform_root_path)
										extract_source_url(platform, platform_root_path, 'download_page.url')

										##	Tries to download the binary
										download_result = download_emulator_binary(platform, platform_root_path)

										##  If the previous method failed, try another technique
										if download_result['emulator_local_filename'] is None:
											for platform_child in platform:
												if platform_child.tag == 'source_url':
													download_page_url = platform_child.text
													if download_page_url is not None:
														print('DownloadEmulatorBinary() : download_page_url = ' + download_page_url)
														download_page_url = download_page_url.strip()
														start_with = child.get('start_with')
														end_with = child.get('end_with')
														download_result = better_binary_download({'machine':  {'short_name': machine_short_name, 'name': machine_name },
																			                        'emulator': {'name': emulator_name,},
																			                        'platform': {'name': platform_name, 'root_path': platform_root_path   },
																			                        'url':      {'download_page': download_page_url, 'start': start_with, 'end': end_with}})

										if download_result['emulator_local_filename'] is not None:
											emulator_download_url = conform_string_to_filename(machine_name) + '/' +  conform_string_to_filename(emulator_name) + '/' + conform_string_to_filename(platform_name) + '/' + download_result['emulator_filename']
										else:
											##  Fetch the previous binary from local disk
											previous_download_result = fetch_previous_binary_from_disk(platform, platform_root_path)
											if previous_download_result is not None:    ## UNFINISHED
												download_result['emulator_filename'] = previous_download_result['emulator_filename']
												download_result['emulator_size'] = previous_download_result['emulator_size']
												# download_result['emulator_download_page'] = '' ## previous_download_result['emulator_download_page']
												emulator_download_url = platform_root_path + '/' + previous_download_result['emulator_filename']
												download_result['emulator_updated_on'] = previous_download_result['emulator_updated_on']
											else:
												emulator_download_url = None

										(current_emulator['emulator_version_list']).append({'emulator_platform':platform_name,
																							'emulator_filename': download_result['emulator_filename'],
																							'emulator_download_url': emulator_download_url,
																							'emulator_size': download_result['emulator_size'],
																							'emulator_download_page': download_result['emulator_download_page'],
																							'emulator_download_page_truncated': urlparse.urlsplit(download_result['emulator_download_page']).netloc,
																							'emulator_updated_on': download_result['emulator_updated_on']
																							})

								(quik_interface['emulator_list']).append(current_emulator)

						##  Get the description of the machine
						if not found_a_description:
							try:
								wiki_page = wikipedia.page(machine_name)
								quik_interface['machine_description'] = wikipedia.summary(machine_name, sentences = 3) ##.encode('utf-8')
								quik_interface['machine_description_source_url'] = wiki_page.url
								quik_interface['machine_description_source'] = wiki_page.url
							except Exception: ##wikipedia.exceptions.DisambiguationError as e:
								##  print e.options
								quik_interface['machine_description'] = "No description found :'("
								quik_interface['machine_description_source_url'] = ""
								quik_interface['machine_description_source'] = ""
								pass

						##  Connect to Pouet and see if there's any prod (game, demo) to download
						print('----------------------------------------------------------------------')
						print('Connect to Pouet and see if there is any prod (game, demo) to download')
						# machine_get_pouet_prods(machine_short_name)
						print('----------------------------------------------------------------------')

						print('----------------------------')
						print('Creates the new machine page')
						##	Creates the new 'machine' page
						##	Render the new html page
						html_output = template_machine.render(quik_interface, quik_loader) ##.encode('utf-8')
						##	Saves the page as a html file
						f = codecs.open(os.path.join(SITE_ROOT, conform_string_to_filename(machine_name) + '.html'), 'w', 'utf-8')
						f.write(html_output)
						f.close()

						print('------------------------------')

	##
	## index.html
	##
	##	Builds the main index
	##  Loop through the whole list of machine
	for machine_name in machine_list:
		tree = parse_machine_from_xml_source(machine_name)
		if tree is not None:
			root = tree.getroot()
			if root.tag == 'data':
				for child in root:
					logging.debug('child.tag = ' + child.tag)
					##	Look for a 'machine'
					if child.tag == 'machine':
						machine = child
						machine_name = machine.get('name')
						machine_type = machine.get('type')

						machine_site_path = os.path.join(SITE_ROOT, conform_string_to_filename(machine_name))
						if os.path.exists(machine_site_path):
							##  Add this machine into the main list
							(quik_interface['machine_list'][machine_type.lower()]).append({'name': machine_name, 'page_url': conform_string_to_filename(machine_name) + '.html'})
							##  Fetch the latest downloaded emulators
							tmp_emulator_update_list = cache_fetch_emulators_update(machine_site_path)
							for _update in tmp_emulator_update_list:
								quik_interface['emulator_update_list'].append(_update)

	##  Generates the Index page based on the whole list
	if G_CREATE_INDEX and len(machine_list) > 0:

		##  Sort the list of updates by date.
		if len(quik_interface['emulator_update_list']) > 0:
			sorted_emulator_update_list = sorted(quik_interface['emulator_update_list'], key = itemgetter('updated_on'), reverse = True)
			quik_interface['emulator_update_list'] = sorted_emulator_update_list[0:5]
			quik_interface['emulator_full_update_list'] = sorted_emulator_update_list

			for _year in range(datetime.date.today().year + 1, 1970, -1):
				year_list = []
				for emu_update in sorted_emulator_update_list:
					if emu_update['updated_on'].find(str(_year)) != -1:
						# print(emu_update['updated_on'])
						year_list.append(emu_update)

				if len(year_list) > 0:
					logging.info('emulator_full_update_list_by_year[] : found ' + str(len(year_list)) + ' emulators.')
					quik_interface['emulator_full_update_list_by_year'].append({'year':_year, 'update_list':year_list})

		template_index = quik_loader.load_template(INPUT_PAGES['index'])
		html_output = template_index.render(quik_interface, quik_loader).encode('utf-8')
		f = codecs.open(os.path.join(SITE_ROOT, 'index.html'), 'w', 'utf-8')
		f.write(html_output)
		f.close()

		template_index = quik_loader.load_template(INPUT_PAGES['update_log'])
		html_output = template_index.render(quik_interface, quik_loader).encode('utf-8')
		f = codecs.open(os.path.join(SITE_ROOT, 'update_log.html'), 'w', 'utf-8')
		f.write(html_output)
		f.close()

	##
	## about.html
	##
	##	Builds the about page
	template_about = quik_loader.load_template(INPUT_PAGES['about'])
	html_output = template_about.render(quik_interface, quik_loader).encode('utf-8')
	f = codecs.open(os.path.join(SITE_ROOT, 'about.html'), 'w', 'utf-8')
	f.write(html_output)
	f.close()
示例#46
0
def gen():
    xmlTree = ET.parse('../../UserFiles/hw_conf.xml')
    root = xmlTree.getroot()

    number_of_node = int(root[1][0].text) * int(root[1][1].text)

    xmlTree = ET.parse('../../UserFiles/application.xml')
    root = xmlTree.getroot()

    processes = []

    for element in root:
        if element.tag == 'process':
            temp = {}
            temp['process_name'] = element.get('name')
            temp['numOfInp'] = element.get('numOfInp')
            temp['numOfOut'] = element.get('numOfOut')
            for subElement in element:
                temp2 = {}
                if subElement.tag == 'port':
                    temp2['ID'] = subElement.get('ID')
                    temp2['name'] = subElement.get('name')
                    temp2['type'] = subElement.get('type')
                    temp2['direction'] = subElement.get('direction')
                    temp2['numOfToken'] = subElement.get('numOfToken')
                    temp.update({subElement.get('name'): temp2})
                elif subElement.tag == 'source_file':
                    temp.update(
                        {'source_file': subElement.get('name').split('_')[1]})
            processes.append(temp)

    i = 0
    for i in range(number_of_node):
        edges = []
        edges2 = []
        input_edges = []
        output_edges = []
        num_of_edges = 0
        flag = 0
        buffers = []
        temp1 = {}
        temp2 = {}
        temp3 = {}
        temp4 = {}
        counter = 0
        dirName = '../../packet_transaction_lib/src/src_' + str(i)
        os.makedirs(dirName, exist_ok=True)

        for process in processes:
            if process['source_file'] == str(i):
                for port in process:
                    if isinstance(process[port], dict):
                        if process[port]['direction'] == 'INP':
                            temp3 = {}
                            temp3['name'] = process[port]['name']
                            buffers.append(temp3)
                        num_of_edges += 1
                        temp1 = {}
                        temp2 = {}
                        temp1['name'] = process[port]['name']
                        for edge in edges:
                            flag = 0
                            if edge['name'] == temp1['name']:
                                if process[port]['direction'] == 'INP':
                                    temp4 = {}
                                    temp4['proc_num'] = process[
                                        'process_name'].split('_')[1]
                                    temp4['port_num'] = process[port]['ID']
                                    temp4['name'] = process[port]['name']
                                    input_edges.append(temp4)
                                num_of_edges -= 1
                                flag = 1
                                continue
                        if flag == 1:
                            continue
                        edges.append(temp1)
                        # edges['name'] = process[port]['name']
                        if process[port]['direction'] == 'INP':
                            temp2['proc_num'] = process['process_name'].split(
                                '_')[1]
                            temp2['port_num'] = process[port]['ID']
                            temp2['name'] = process[port]['name']
                            input_edges.append(temp2)
                        elif process[port]['direction'] == 'OUT':
                            temp2['proc_num'] = process['process_name'].split(
                                '_')[1]
                            temp2['port_num'] = process[port]['ID']
                            temp2['name'] = process[port]['name']
                            output_edges.append(temp2)

        counter = 0
        flag = 0
        edges2 = []
        temp = {}
        for process in processes:
            if process['source_file'] == str(i):
                for port in process:
                    if isinstance(process[port], dict):
                        temp = {}
                        temp['num_of_inp_token'] = None
                        temp['num_of_out_token'] = None
                        temp['counter'] = counter
                        counter += 1
                        temp['name'] = process[port]['name']

                        for edge in edges2:
                            flag = 0
                            if edge['name'] == temp['name']:
                                if process[port]['direction'] == 'INP':
                                    edge['num_of_inp_token'] = 'P' + process[
                                        'process_name'].split(
                                            '_')[1] + '_INP' + process[port][
                                                'ID'] + '_NUM_OF_TOKEN'
                                    edge['size_of_token_type'] = 'P' + process[
                                        'process_name'].split(
                                            '_')[1] + '_INP' + process[port][
                                                'ID'] + '_TYPE'
                                    edge['buffer'] = process[port]['name']
                                elif process[port]['direction'] == 'OUT':
                                    edge['num_of_out_token'] = 'P' + process[
                                        'process_name'].split(
                                            '_')[1] + '_OUT' + process[port][
                                                'ID'] + '_NUM_OF_TOKEN'
                                counter -= 1
                                flag = 1
                                continue
                        if flag == 1:
                            continue

                        temp['proc_src'] = process[port]['name'].split(
                            '_')[0][1]
                        temp['proc_dest'] = process[port]['name'].split(
                            '_')[1][1]
                        if process[port]['direction'] == 'INP':
                            temp['num_of_inp_token'] = 'P' + process[
                                'process_name'].split(
                                    '_')[1] + '_INP' + process[port][
                                        'ID'] + '_NUM_OF_TOKEN'
                            temp['size_of_token_type'] = 'P' + process[
                                'process_name'].split('_')[
                                    1] + '_INP' + process[port]['ID'] + '_TYPE'
                            temp['node_dest'] = process['source_file']
                            temp['buffer'] = process[port]['name']
                        if process[port]['direction'] == 'OUT':
                            temp['num_of_out_token'] = 'P' + process[
                                'process_name'].split(
                                    '_')[1] + '_OUT' + process[port][
                                        'ID'] + '_NUM_OF_TOKEN'
                            temp['size_of_token_type'] = 'P' + process[
                                'process_name'].split('_')[
                                    1] + '_OUT' + process[port]['ID'] + '_TYPE'
                            temp['node_src'] = process['source_file']
                        for process2 in processes:
                            for port2 in process2:
                                if isinstance(process2[port2], dict):
                                    if process2[port2]['name'] == temp[
                                            'name'] and process[
                                                'process_name'] != process2[
                                                    'process_name']:
                                        if process2[port2][
                                                'direction'] == 'INP':
                                            temp['node_dest'] = process2[
                                                'source_file']
                                        elif process2[port2][
                                                'direction'] == 'OUT':
                                            temp['node_src'] = process2[
                                                'source_file']
                        temp['external'] = 0
                        for process2 in processes:
                            for port2 in process2:
                                if isinstance(process2[port2], dict):
                                    if process2[port2]['name'] == temp[
                                            'name'] and process[
                                                'source_file'] != process2[
                                                    'source_file']:
                                        temp['external'] = 1
                        edges2.append(temp)

        loader = FileLoader('')
        template = loader.load_template(
            'Templates/packet_transaction_util_template.c')
        with open('../../packet_transaction_lib/src/src_' + str(i) +
                  '/packet_transaction_util.c',
                  'w',
                  encoding='utf-8') as f:
            L = [
                '#include "packet_transaction_util.h"\n',
                '#include "packet_transaction.h"\n\n'
            ]
            f.writelines(L)
            f.write(template.render(locals()))
def render(request, template_file, content={}):
    content.update({'request': request})
    loader = FileLoader(settings.QUIK_TEMPLATE_DIR, settings.DEBUG)
    template = loader.load_template(template_file)
    r = template.render(content, loader=loader).encode('utf-8')
    return HttpResponse(r)
示例#48
0
																			   self.photographer,
																			   self.date_taken)


def get_key(some_object):
	return some_object.sortnumber


def get_sec(s):
	l = s.split(':')
	if l[0][0:1] != "-":
		return int(l[0]) * 3600 + int(l[1]) * 60 + int(l[2])
	else:
		return int(l[0]) * 3600 + (int(l[1]) * 60 * -1) + (int(l[2]) * -1)

template_loader = FileLoader('./templates')

output_TOC_file_name_and_path = "../_webroot/TOC.php"
output_TOC_file = open(output_TOC_file_name_and_path, "w")
output_TOC_file.write("")
output_TOC_file.close()

output_TOC_file = open(output_TOC_file_name_and_path, "a")

output_TOC_index_file_name_and_path = "../_webroot/indexes/TOCData.csv"
output_TOC_index_file = open(output_TOC_index_file_name_and_path, "w")
output_TOC_index_file.write("")
output_TOC_index_file.close()

output_TOC_index_file = open(output_TOC_index_file_name_and_path, "a")
示例#49
0
 def __init__(self, basedir):
     if not os.path.isabs(basedir):
         basedir = os.path.abspath(basedir)
     self.loader = FileLoader(basedir)
     self.basedir = basedir
示例#50
0
output_TOC_file_name_and_path = "./_webroot/TOC.html"
output_TOC_file = open(output_TOC_file_name_and_path, "w")
output_TOC_file.write("")
output_TOC_file.close()

output_TOC_file = open(output_TOC_file_name_and_path, "a")

output_TOC_index_file_name_and_path = "./_webroot/indexes/TOCindex.csv"
output_TOC_index_file = open(output_TOC_index_file_name_and_path, "w")
output_TOC_index_file.write("")
output_TOC_index_file.close()

output_TOC_index_file = open(output_TOC_index_file_name_and_path, "a")

## ---------------- Write TOC
template_loader = FileLoader('templates')
#WRITE HEADER
template = template_loader.load_template('template_TOC_header.html')
output_TOC_file.write(template.render({'datarow': 0}, loader=template_loader).encode('utf-8'))

#WRITE TOC ITEMS
prev_depth = 0
timestamp = ""
inputFilePath = "../MISSION_DATA/Mission TOC.csv"
csv.register_dialect('pipes', delimiter='|', doublequote=True, escapechar='\\')
reader = csv.reader(open(inputFilePath, "rU"), dialect='pipes')
for row in reader:
    timestamp = row[0]
    item_depth = row[1]
    item_title = row[2]
    item_subtitle = row[3]
示例#51
0
class DirectoryRender:
    def __init__(self, basedir):
        if not os.path.isabs(basedir):
            basedir = os.path.abspath(basedir)
        self.loader = FileLoader(basedir)
        self.basedir = basedir

    def render(self,
               destdir,
               inputhash,
               resources,
               dry_run=False,
               currpath="",
               config={}):
        """
        Transforms all templates and downloads all files in the directory
        supplied with the input values supplied. Output goes in destdir.

        Keyword arguments:
            destdir -- the directory to put the rendered files into
            inputhash -- the dictionary of input values
        """
        items = sorted(os.listdir(os.path.join(self.basedir, currpath)))

        if currpath == "" and 'config.sq' in items:
            # do config.sq stuff only in the top level directory
            config_items = self.parse_config_sq(
                os.path.join(self.basedir, 'config.sq'), inputhash)

            real_config = {}
            for config_item in config_items:
                real_config[config_item.filepath] = config_item

            config = real_config
            items.remove('config.sq')

        result = {}
        already_configured = set()
        for filename in items:
            # the path of the source file relative to the basedir
            relpath = os.path.join(currpath, filename)

            # the absolute path of the source file
            abs_source = os.path.join(self.basedir, relpath)

            # the absolute path of the destination file, templated
            dest = self._template(os.path.join(destdir, relpath), inputhash)

            if os.path.isdir(abs_source):
                mkdirp(dest)
                # Needs a slash because this is a directory
                stripped = dest[len(destdir) + 1:] + os.path.sep

                key = relpath + os.path.sep
                if key in config:
                    result[key] = config[key].atomic

                apply_config(
                    destdir,
                    get_config(dest, stripped, config, already_configured),
                    dry_run)
                result.update(
                    self.render(destdir, inputhash, resources, dry_run,
                                relpath, config))
            else:
                ext = get_sq_ext(filename)
                if ext in extension_handles:
                    # call the specific handler for this file extension
                    finalfile = extension_handles[ext](**{
                        'loader': self.loader,
                        'inputhash': inputhash,
                        'relpath': relpath,
                        'abs_source': abs_source,
                        'dest': dest,
                        'resources': resources
                    })
                else:
                    # otherwise, just copy the file
                    copyfile(abs_source, dest)
                    finalfile = dest

                finalext = get_file_ext(finalfile)
                stripped = finalfile[len(destdir) + 1:]
                if os.path.isdir(finalfile):
                    stripped = stripped + os.path.sep
                    if stripped in config:
                        result[stripped] = config[stripped].atomic

                apply_config(
                    destdir,
                    get_config(finalfile, stripped, config,
                               already_configured), dry_run)

                # if there's an automatic way to test this type of file,
                # try it
                if finalext in autotest.testable:
                    if not autotest.testable[finalext](finalfile):
                        raise ValueError(
                            'File {} didn\'t pass validation for {}'.format(
                                finalfile, finalext))

        return result

    def _template(self, item, config):
        template = Template(item)
        return template.render(config)

    def parse_config_sq(self, filename, inputhash):
        """
        Parses a config.sq file which contains metadata about files in this
        directory.

        Keyword arguments:
            filename -- the config.sq file to open
            inputhash -- the service variables to template config.sq with
        """
        # Conversion functions from string to the correct type, str is identity
        convert = {'atomic': bool, 'user': str, 'group': str, 'mode': str}
        require_dir = set(['atomic'])
        result = []

        template = self.loader.load_template(filename)
        contents = template.render(inputhash, loader=self.loader)

        for line in contents.split('\n'):
            # These are the default values
            item = {'atomic': False, 'user': None, 'group': None, 'mode': None}

            if line == "" or line.startswith("#"):
                continue

            args = line.split()
            if len(args) < 2:
                raise ValueError(
                    'Line "{}" in file {} isn\'t formatted correctly'.format(
                        line, filename))

            filepath = args[0]
            for arg in args[1:]:
                (key, value) = arg.split(':', 2)

                if key in require_dir:
                    # if this key requires us to be a directory, check
                    if not filepath.endswith(os.path.sep):
                        raise ValueError(
                            'Key {} requires entry {} to end with' +
                            ' a slash (must be directory) in file {}'.format(
                                key, filepath, filename))

                #Only do work if we know about this parameter
                if key in convert:
                    item[key] = convert[key](value)
                else:
                    raise ValueError(
                        'Unknown config.sq value {} in file {}'.format(
                            key, filename))
            result.append(
                FileConfig(filepath, item['atomic'], item['user'],
                           item['group'], item['mode']))

        return result
示例#52
0
__author__ = "Feist"
import csv
from quik import FileLoader

output_file_name_and_path = "./output/TOCtest.html"
outputFile = open(output_file_name_and_path, "w")
outputFile.write("")
outputFile.close()

outputFile = open(output_file_name_and_path, "a")

loader = FileLoader("templates")
# WRITE HEADER
template = loader.load_template("template_TOC_header.html")
outputFile.write(template.render({"datarow": 0}, loader=loader).encode("utf-8"))

# WRITE BODY ITEMS
prevDepth = 0
inputFilePath = "E:\Apollo17.org\MISSION_DATA\Mission TOC.csv"
reader = csv.reader(open(inputFilePath, "rU"), delimiter="|")
for row in reader:
    timestamp = row[0]
    itemDepth = row[1]
    itemTitle = row[2]
    itemSubtitle = row[3]
    loader = FileLoader("templates")
    template = loader.load_template("template_TOC_item.html")
    outputFile.write(
        template.render(
            {
                "timestamp": timestamp,
示例#53
0
 def render(self, template_name, *args, **kwargs):
     loader = FileLoader(self.template_dirs[0])
     template = loader.load_template(template_name)
     return template.render(kwargs, loader=loader).encode('utf-8')
示例#54
0
        tiles.insert(j*int(NoC['ColNo'])+i, {'j': j, 'i': i, 'number': j*int(NoC['ColNo'])+i    , 'node_type': 'processor', 'node_name': 'NIOS II/e', 'memory_size': '16384.0',
                         'fifo_in_depth': '16', 'fifo_out_depth': '16'})

# default values for processors
for i in range(0, numOfProcessors):
    processors.insert(i, {'number': str(i), 'node_type': 'processor', 'node_name': 'NIOS II/e', 'memory_size': '16384.0', 'fifo_in_depth': '16', 'fifo_out_depth': '16'})

# change the values of tiles and processors, if it needs
for tile in root[2]:
    if tile[0].text == "processor":
        for child in tile:
            processors[int(tile[0].attrib['number'])][child.tag] = child.text
    for child in tile:
        tiles[int(tile.attrib['number'])][child.tag] = child.text

loader = FileLoader('Templates')

# generate qsys_system.tcl
template = loader.load_template('qsys_system.tcl')
with open("../../hw_scripts/qsys_system.tcl", 'w', encoding = 'utf-8') as f:
    f.write(template.render(locals()))

# generate NoC.vhd
template = loader.load_template('NOC.vhd')
with open("../../hw_sources/NOC.vhd", 'w', encoding = 'utf-8') as f:
    f.write(template.render(locals()))

# generate NoC_hw.tcl
template = loader.load_template('NoC_hw.tcl')
with open("../../hw_scripts/NoC_hw.tcl", 'w', encoding = 'utf-8') as f:
    f.write(template.render(locals()))
示例#55
0
def main() :
    parser = argparse.ArgumentParser(description='Read data from REGO2000')
    parser.add_argument('-m', '--mode', default='json')
    parser.add_argument('-c', '--configfile', required=True)
    parser.add_argument('-s', '--configsection', required=True)
    parser.add_argument('-p', '--path', required=True)
    parser.add_argument('-x', '--explore', action='store_true', help='use the list of starting points in <path> to fetch and enumerate all available URLs')
    args = parser.parse_args()

    config = configparser.SafeConfigParser()
    config.read(args.configfile)
    host = config.get(args.configsection, 'host');
    vendorkey = config.get(args.configsection, 'vendorkey');
    devicepassword = config.get(args.configsection, 'devicepassword');
    userpassword = config.get(args.configsection, 'userpassword');
    key = makeKey(vendorkey, devicepassword, userpassword)
    htmltemplate = config.get(args.configsection, 'htmltemplate');

    if args.explore :
        startpaths = args.path.split(',')
        leaves = []
        while startpaths :
            sp = startpaths.pop().strip()
            sys.stderr.write("Trying " + sp + "\n")
            try :
                res = json.loads(get(host, sp, key).decode())
                if res["type"] == "refEnum" :
                    for nsp in res["references"] :
                        startpaths.append(nsp["id"])
                elif "value" in res :
                    leaves.append("scalar:" + sp)
                elif "values" in res :
                    leaves.append("vector:" + sp)
                else :
                    leaves.append("unknown:" + sp)
            except:
                sys.stderr.write('Caught exception\n')
                pass
        leaves.sort()
        for leaf in leaves :
            print(leaf)
                    
    else :
        try:
            data = get(host, args.path, key)

            if args.mode == 'raw':
                print(data)
            elif args.mode == 'string':
                print(data.decode())
            elif args.mode == 'json':
                pdata = json.loads(data.decode())
                print(json.dumps(pdata, sort_keys=True, indent=4))
            elif args.mode == 'value':
                pdata = json.loads(data.decode())
                print(pdata["value"])
            elif args.mode == 'values':
                pdata = json.loads(data.decode())
                print(json.dumps(pdata["values"], sort_keys=True, indent=4))
            elif args.mode == 'errcodes':
                ecfile = config.get(args.configsection, 'errcodes');
                with open(ecfile) as data_file:
                    errcodes = json.load(data_file)
                pdata = json.loads(data.decode())
                notifications=[]
                for v in pdata["values"] :
                    note={}
                    note["orig"] = v
                    if str(v["ccd"]) in errcodes :
                        note["explanation"] = errcodes[str(v["ccd"])]
                    notifications.append(note)
                loader = FileLoader('')
                template = loader.load_template(htmltemplate)
                if notifications : 
                    print (template.render(locals(), loader=loader))
                else :
                    print ('<html><head></head><body><h1>No active notifications</h1></body></html>');
        except:
            print ('N/A')
            pass
示例#56
0
            print html

    def call_api(self, api_url):
        auth_params = {"os_username": self.login, "os_password": self.password}
        encoded_params = urllib.urlencode(auth_params)
        api_url = "%s/%s&%s" % (self.srv_address, api_url, encoded_params)
        with contextlib.closing(self.opener.open(api_url)) as f:
            xml = parseString(f.read())
            return xml


if __name__ == "__main__":
    client = ApiClient()
    xml_doc = client.call_api("sr/jira.issueviews:searchrequest-xml/10280/SearchRequest-10280.xml?tempMax=1000")
    tickets = xml_doc.getElementsByTagName("item")
    print_fields = ["title", "description", "assignee", "reporter", "fixVersion"]
    c = {"tickets": []}
    for ticket in tickets:
        print_params = {}
        for field in print_fields:
            for f in ticket.getElementsByTagName(field):
                for node in f.childNodes:
                    print_params[field] = node.nodeValue
        c["tickets"].append(print_params)

    loader = FileLoader("")
    template = loader.load_template("tickets.html")
    f = open("result.html", "w+")
    f.write(template.render(c, loader=loader).encode("utf-8"))
    f.close()
示例#57
0
        tempRecord.append(photo_row[5])  #source attribution
        photo_list.append(tempRecord.copy())
    # print('Photo counter: ' + str(photocounter))
    # photocounter += 1

sorted_list = sorted(photo_list, key=operator.itemgetter(0))

for list_item in sorted_list:
    outputLine = '{0}|{1}|{2}|{3}|{4}|{5}\n'.format(list_item[1], list_item[2],
                                                    list_item[3], list_item[4],
                                                    list_item[5], list_item[6])
    output_photo_index_file.write(outputLine)
output_photo_index_file.close()

# -------------------- Write TOC
template_loader = FileLoader('./templates')

output_TOC_file_name_and_path = "../_website/_webroot/13/TOC.html"
output_TOC_file = open(output_TOC_file_name_and_path, "w")
output_TOC_file.write("")
output_TOC_file.close()

output_TOC_file = open(output_TOC_file_name_and_path, "ab")

output_TOC_index_file_name_and_path = "../_website/_webroot/13/indexes/TOCData.csv"
output_TOC_index_file = open(output_TOC_index_file_name_and_path, "w")
output_TOC_index_file.write("")
output_TOC_index_file.close()

output_TOC_index_file = open(output_TOC_index_file_name_and_path, "a")
示例#58
0
def get_loader():
    return FileLoader(os.getcwd())