Beispiel #1
0
def write(picArray):
    name = input(
        "Enter name of the file you want create?(without '.xml')\n") + ".xml"
    if (utils.askYN("Do you want to save your config to %s?(y/n) " %
                    (name)) != True):
        return
    print()

    background = Element('background')
    starttime = SubElement(background, 'starttime')

    year = SubElement(starttime, 'year')
    year.text = '2019'

    month = SubElement(starttime, 'month')
    month.text = '1'

    day = SubElement(starttime, 'day')
    day.text = '1'

    hour = SubElement(starttime, 'hour')
    hour.text = '0'

    minute = SubElement(starttime, 'minute')
    minute.text = '0'

    second = SubElement(starttime, 'second')
    second.text = '0'

    for i in range(len(picArray)):
        comment = Comment(picArray[i].strTime)
        background.append(comment)

        static = SubElement(background, 'static')
        duration = SubElement(static, 'duration')
        duration.text = str(picArray[i].secTime) + ".0"
        path = SubElement(static, 'file')
        path.text = picArray[i].path

        transition = SubElement(background, 'transition', {'type': 'overlay'})
        duration = SubElement(transition, 'duration')
        duration.text = '5.0'
        fromPath = SubElement(transition, 'from')
        fromPath.text = picArray[i].path
        toPath = SubElement(transition, 'to')
        if (i < (len(picArray) - 1)):
            #if its not last then do it normally
            toPath.text = picArray[i + 1].path
        else:
            #but if its the last one, end it with the first pic
            toPath.text = picArray[0].path

    print(utils.prettify(background))
    tree = ElementTree(background)
    f = open(name, 'w')
    f.write(utils.prettify(background))
Beispiel #2
0
 def __init__(self, app, category, folder_name):
     self.category = category
     self.folder_name = folder_name
     self.pretty_name = prettify(folder_name)
     self.sounds = Sound.list(app, self)
     self.image_path = "sounds/{0}/{1}/image.png".format(
         category.folder_name, folder_name)
Beispiel #3
0
async def parse_facts_component(html_content):
    meta_data = {}
    elem = html_content.find("ol", class_="chart")

    meta_data[html_content.find("strong").text.strip()] = [
        i.text.strip() for i in elem.findAll("li")
    ]

    elem_2 = html_content.findAll("div", class_="form-group")[1]

    meta_data[elem_2.find("strong").text.strip()] = [
        i.text.strip() for i in elem_2.findAll("li")
    ]

    text = ""
    for k, v in meta_data.items():
        k = normalize_text(k)
        text += f"<{k}>"
        for i in v:
            if not i:
                continue
            text += "<value>"
            text += f"{i}"
            text += "</value>"
        text += f"</{k}>"

    return prettify(text, "facts")
Beispiel #4
0
async def parse_behind_the_scenes(link, progress):
    try:
        resp = await client.get(f"{link}/achter-de-schermen.html")
        soup = bs4.BeautifulSoup(resp.content, "lxml")
        rows_info = soup.findAll("div", class_="row")
        xml_data = ""
        for row_info in rows_info:
            right_text = row_info.find(
                "div", class_="col-md-5 col-sm-12 text-right"
            ).text.strip()
            left_text = row_info.find(
                "div", class_="col-md-5 col-sm-12 text-left"
            ).text.strip()
            left_value = int(row_info.input.get("data-slider-value"))
            right_value = 100 - left_value
            left_value, right_value = left_value / 10, right_value / 10
            left_xml = (
                f"<description>{left_text}</description><value>{left_value}</value>"
            )
            right_xml = f"<description>{right_text}</description><value>{right_value}</value></stat>"
            xml_data += "<stat>"
            xml_data += left_xml
            xml_data += right_xml
            xml_data += "</stat>"
        return prettify(f"{xml_data}", "behind_the_scenes")

    except Exception as e:
        with open(__file__ + ".log", "a") as f:
            f.write(link + "\n")
    finally:
        progress()
Beispiel #5
0
async def parse_numbers_page(link, progress):
    try:
        resp = await client.get(f"{link}/cijfers.html")
        soup = bs4.BeautifulSoup(resp.content, "lxml")
        info_index = soup.find(
            "div", attrs={"id": "cijfers"}, class_="profile-columns"
        )

        categories = list(map(lambda x: x.text.strip(), info_index.findAll("h3")))

        rows_info = info_index.findAll("div", class_="col-sm-4")

        meta_data = {k: v for k, v in zip(categories, rows_info)}

        details_info = await parse_details_component(meta_data["Details"])
        facts_info = await parse_facts_component(meta_data["Feiten"])
        sti_info = await parse_short_term_improvements_component(
            meta_data["Korte termijn verbeteringen"]
        )
        return prettify(f"{details_info}\n{facts_info}\n{sti_info}\n", "numbers")

    except Exception as e:
        print(e)
        with open("numbers.log", "a") as f:
            f.write(link + "\n")

    finally:
        progress()
Beispiel #6
0
 async def get_remaining_map_string(self):
     embed_string = ""
     num_available = 1
     for m in self.map_dict.keys():
         if self.map_dict[m]:
             pretty = prettify(m)
             embed_string += "{}. {}\n".format(num_available, pretty)
             num_available += 1
     return embed_string
    def update(self):
        self._config_ssl()

        cfg_dict = OrderedDict()
        cfg_dict.update(self._config_global_section())
        cfg_dict.update(self._config_defaults_section())
        cfg_dict.update(self._config_stats_section())
        cfg_dict.update(self._config_userlist_section(HTTP_BASIC_AUTH))
        cfg_dict.update(self._config_tcp_sections())
        cfg_dict.update(self._config_frontend_sections())
        cfg_dict.update(self._config_backend_sections())

        cfg = prettify(cfg_dict)
        self._update_haproxy(cfg)
Beispiel #8
0
async def parse_extra_information(link, progress):
    try:
        resp = await client.get(f"{link}/extra-informatie.html")
        soup = bs4.BeautifulSoup(resp.content, "lxml")
        text = soup.find("p", class_="lead").text.strip()
        text = newlines_to_sentences(text)
        return prettify(f"{text}", "extra_information")

    except Exception as e:
        with open("extra_information.log", "a") as f:
            f.write(link + "\n")

    finally:
        progress()
    def update(self):
        if self.specs:
            self._config_ssl()
            cfg_dict = OrderedDict()
            cfg_dict.update(self._config_global_section())
            cfg_dict.update(self._config_defaults_section())
            cfg_dict.update(self._config_stats_section())
            cfg_dict.update(self._config_userlist_section(HTTP_BASIC_AUTH))
            cfg_dict.update(self._config_tcp_sections())
            cfg_dict.update(self._config_frontend_sections())
            cfg_dict.update(self._config_backend_sections())

            cfg = prettify(cfg_dict)
            self._update_haproxy(cfg)
        else:
            logger.info("Internal error: Specs is not initialized")
    def update(self):
        if self.specs:
            self._config_ssl()
            cfg_dict = OrderedDict()
            cfg_dict.update(self._config_global_section())
            cfg_dict.update(self._config_defaults_section())
            cfg_dict.update(self._config_stats_section())
            cfg_dict.update(self._config_userlist_section(HTTP_BASIC_AUTH))
            cfg_dict.update(self._config_tcp_sections())
            cfg_dict.update(self._config_frontend_sections())
            cfg_dict.update(self._config_backend_sections())

            cfg = prettify(cfg_dict)
            self._update_haproxy(cfg)
        else:
            logger.info("Internal error: Specs is not initialized")
Beispiel #11
0
    def buildControlET(self, file_name):
        root = ET.Element("Control")

        start_id = -1
        for item in self.ET_tree.iterfind("root/mxCell[@value='start']"):
            start_id = item.get("id")
            break

        for next_id in self.connection_dict[start_id]:
            (end_id, final_ET) = self.processConnection(next_id)
        element_type = self.getElementTypeWithID(end_id)
        if element_type != "end":
            raise ValueError("Did we actually finish? Ends at {}".format(end_id))

        root.append(final_ET)

        with open(file_name, "w") as f:
            f.write(prettify(root))
Beispiel #12
0
async def parse_details_component(html_content):
    meta_data = {}
    for i in html_content.findAll("div", class_="form-group"):
        key = i.find("strong").text.strip()
        meta_data[key] = {}
        keys = i.findAll("div", class_="col-xs-7")
        values = i.findAll("div", class_="col-xs-5")
        for k, v in zip(keys, values):
            k, v = k.text.strip(), v.text.strip()
            meta_data[key][k] = v
    text = ""
    for k, v in meta_data.items():
        k = normalize_text(k)
        text += f"<{k}>"
        for k1, v1 in v.items():
            k1 = normalize_text(k1)
            text += f"<{k1}>{v1}</{k1}>"
        text += f"</{k}>"

    return prettify(text, "details")
Beispiel #13
0
def generateXML(filename, outputPath, w, h, d, boxes):
    top = ET.Element('annotation')
    childFolder = ET.SubElement(top, 'folder')
    childFolder.text = 'images'
    childFilename = ET.SubElement(top, 'filename')
    childFilename.text = filename[0:filename.rfind(".")]
    childPath = ET.SubElement(top, 'path')
    childPath.text = outputPath + "/" + filename
    childSource = ET.SubElement(top, 'source')
    childDatabase = ET.SubElement(childSource, 'database')
    childDatabase.text = 'Unknown'
    childSize = ET.SubElement(top, 'size')
    childWidth = ET.SubElement(childSize, 'width')
    childWidth.text = str(w)
    childHeight = ET.SubElement(childSize, 'height')
    childHeight.text = str(h)
    childDepth = ET.SubElement(childSize, 'depth')
    childDepth.text = str(d)
    childSegmented = ET.SubElement(top, 'segmented')
    childSegmented.text = str(0)
    for box in boxes:
        (category, (x, y, wb, hb)) = box
        childObject = ET.SubElement(top, 'object')
        childName = ET.SubElement(childObject, 'name')
        childName.text = category
        childPose = ET.SubElement(childObject, 'pose')
        childPose.text = 'Unspecified'
        childTruncated = ET.SubElement(childObject, 'truncated')
        childTruncated.text = '0'
        childDifficult = ET.SubElement(childObject, 'difficult')
        childDifficult.text = '0'
        childBndBox = ET.SubElement(childObject, 'bndbox')
        childXmin = ET.SubElement(childBndBox, 'xmin')
        childXmin.text = str(x)
        childYmin = ET.SubElement(childBndBox, 'ymin')
        childYmin.text = str(y)
        childXmax = ET.SubElement(childBndBox, 'xmax')
        childXmax.text = str(x + wb)
        childYmax = ET.SubElement(childBndBox, 'ymax')
        childYmax.text = str(y + hb)
    return prettify(top)
Beispiel #14
0
	def getPrettyDictList(self):
		"""
		Get a list of dictionaries which keys are strings and values are
		pretty_urls, strings, ints and dates

		:return: List of dictionaries
		"""
		
		# Order namespaces from longest to shortest (in order to match first
		# full path instead of partial path) 
		namespaces_ordered_keys = sorted(self.namespaces.keys(), (lambda x,y: len(x)-len(y)), reverse=True )
		
		l = []
		for res in self.sparql_results.bindings:
			d = {}
			for k in self.getFields():
				d[str(k)] = utils.prettify(res[k], namespaces=self.namespaces, namespaces_ordered_keys=namespaces_ordered_keys)
				
			l += [ d ]
		
		return l
Beispiel #15
0
async def parse_opportunities(link, progress):
    try:
        resp = await client.get(f"{link}/kansen.html")
        soup = bs4.BeautifulSoup(resp.content, "lxml")
        xml_data = ""
        for i in [
            newlines_to_sentences(i.text.strip())
            for i in soup.findAll("div", class_="opportunity")
        ]:
            if not i:
                continue
            xml_data += f"<opportunity>{i}</opportunity>"

        return prettify(f"{xml_data}", "opportunities")

    except Exception as e:
        with open("opportunities.log", "a") as f:
            f.write(link + "\n")

    finally:
        progress()
Beispiel #16
0
    async def ban_map(self, map_to_ban: str, caller: Player):
        """
        Remove map from pool
            :param map_to_ban: str that represents map to ban
            :param caller: discord.Member object that represents who called the command
        """
        if caller not in self.captains.values():
            return discord.Embed(title="Valorant 10 Man Bot",
                                 description="Only captains can ban maps")
        map_to_ban = map_to_ban[0].upper() + map_to_ban[1:].lower()

        if map_to_ban.lower() in self.map_dict.keys() and self.map_dict[
                map_to_ban.lower()] == True:
            self.map_dict[map_to_ban.lower()] = False
            counter = Counter(self.map_dict.values())
            embed_string = ""

            if counter[False] == len(
                    self.map_dict.keys()) - 1:  # one map remaining
                embed_string = "The match will be played on {}".format(
                    next((prettify(k)
                          for k in self.map_dict.keys() if self.map_dict[k]),
                         None))
            else:
                embed_string = f"{map_to_ban} has been banned\n\n The remaining maps are\n\n" + await self.get_remaining_map_string(
                )
            return discord.Embed(title="Valorant 10 Man Bot",
                                 description=embed_string)

        elif map_to_ban.lower() not in self.map_dict.keys():
            return discord.Embed(
                title="Valorant 10 Man Bot",
                description=f"{map_to_ban} is not a valid map")

        elif not self.map_dict[map_to_ban.lower()]:
            return discord.Embed(
                title="Valorant 10 Man Bot",
                description=
                f"{map_to_ban} is already banned. The remaining maps are:\n" +
                await self.get_remaining_map_string())
Beispiel #17
0
async def parse_strengths(link, progress):
    try:
        resp = await client.get(f"{link}/sterke-punten.html")
        soup = bs4.BeautifulSoup(resp.content, "lxml")
        strengths = ""
        weaknesses = ""
        for i in [i.text.strip() for i in soup.findAll("div", class_="strength")]:
            i = newlines_to_sentences(i)
            if not i:
                continue
            strengths += f"<strength>{i}</strength>"
        for i in [i.text.strip() for i in soup.findAll("div", class_="weakness")]:
            if not i:
                continue
            weaknesses += f"<weakness>{i}</weakness>"
        xml_data = f"<strength_info>{strengths}</strength_info><weakness_info>{weaknesses}</weakness_info>"
        return prettify(xml_data, "strength")

    except Exception as e:
        with open(__file__ + ".log", "a") as f:
            f.write(link + "\n")

    finally:
        progress()
Beispiel #18
0
def parse(filename = None, 
    lines = 100000, 
    grab_from = 'front', 
    offset = 0, 
    exclusions=['Comment'],
    header_lines=24,
    header_scheme={
        'start_time':11,
        'delta_x':21,
        'column_titles':23
    },
    output=True,
    **kwargs):
    
    """ Parses the lvm file produced by the acquisition vi. 
        args (all optional)
        -----------
        -> filename if none provided you will be prompted by a gui
        -> lines. limits file size by grabbing only the first N lines. 
                defaults to 100,000 lines
        -> grabFrom. do we grab N lines from the front or the back?
            ('front' or 'back')
        -> offset. skip N lines from either the back or the front
        -> exclusions:
        -> header_lines: used to specify the schema for the header.
                        if you update the script you should be able to
                        make this work by respecifying the header info here.
                        the header is 24 lines long by default.
        -> header_scheme: a dictionary that contains the locations of the
                    useful things.
                    There should be:
                    - start_time
                    - delta_x
        -> output : True/False, print things. or not... up to you. 

        Throws noFileException
        returns a bunch of stuff in a dict. they all have descriptive 
            key names
        {   sampling frequency
            delta x
            input/output ave array
            input/output array
            output binned with 20 datapoints
            time array
            manual_particle_detector, a list of 1 or zero. }

        also plots some summary data i.e. particels, out, & input
        
        Notes
        =========
            This does assume some things about the datafiles structures.
            
            1. there should be a comment consisting with a timestamp & 
            the result from the manual_particle_detector button within the 
            acquisition.vi program. you can press it to mark specific parts of the 
            datafile. Mainly used to mark particles passing. 

            2. the comment should be the last column. this DOES NOT CARE how often 
            the comment is filled in. This will replace any empty comment rows with the 
            most recent filled one. you'll have repeats, but that's fine. Most of the 
            functions in this package require consistant list lengths for x/y values.    


        """

    seprator = '\t' # we assume these are tab seprated values.

    # open the file
    f = getfile(filename)

    # read the header                                          
    header = [next(f).strip() for x in xrange(header_lines)]   
    if output: print('\n'.join([str(i) + ':\t' +x  for i,x in enumerate(header)]))

    # ================
    # Parse the header
    # ================
    
    ## === start_time ========================
    start_time_line = header[header_scheme['start_time']]
    # start time line looks like: "Time \t timestr \t timestr"
    start_time_line_split = start_time_line.split(seprator) 
    # ----------
    start_time = start_time_line_split[1][:15] # grab HH:MM:SS.SSSSSSSSSSS

    
    ## === sampling frequency ================
    delta_x_line = header[header_scheme['delta_x']]
    delta_x_line_split = delta_x_line.split(seprator)
    # ---------
    delta_x = float(delta_x_line_split[2])
    sampling_freq = 1/ delta_x

    
    # === column titles =====================
    column_title_line = header[header_scheme['column_titles']]
    column_title_line_split = column_title_line.split(seprator)
    # ---------
    column_titles = column_title_line_split # grab all the column names
    num_cols = len(column_titles) # the number of columns in the file.  


    # ================
    # Parse the files
    # ================

    # === Grab the file ================
    datalines = None # container to hold the lines read from the file. 
    if grab_from == 'back':
        datalines = tail(lines, f=f, output=output, offset=offset)
    else:
        [next(f) for x in xrange(offset)]               # eat the offset lines
        datalines = f

    # === prep the columns ============
    columns = {}    # dict to hold column vars
    columns_map = {} # dict with same keys, but with column index in dat file.

    for i,col in enumerate(column_titles):
        columns[col] = [] # initialize a list to hold the column
        columns_map[col] = i # remember what index the column is
    columns['manual_particle_detector'] = [] 
        # assuming the datafile contains the manual patricle detector button.
        # which is recorded within the comment.  

    # === read the file =============
    for i,l in enumerate(datalines):
        dataline_split = l.strip().split(seprator)
        # if this row has no comment, get it from the previous line
        if len(dataline_split) != num_cols:
            dataline_split.append(columns['Comment'][-1])

        for k in columns:
            if k == 'manual_particle_detector':
                particle_present = dataline_split[-1].split(',')[-1]
                columns[k].append(particle_present)
            else:
                columns[k].append(dataline_split[columns_map[k]])
            if k != 'Comment':
                columns[k][-1] = float(columns[k][-1]) # convert to float

    for k in columns:
        columns[k] = np.array(columns[k])

    f.close() # we have read the file. close it now.

    # ============
    # Return stuff
    # ============
    to_ret = {k:columns[k] for k in columns if k not in exclusions}
    to_ret['numcols'] = num_cols
    to_ret['delta_x'] = delta_x
    to_ret['start_time'] = start_time
    to_ret['sampling frequency'] = sampling_freq
    to_ret['filename'] = filename

    if output: print(prettify(to_ret))
    return to_ret
Beispiel #19
0
async def parse_short_term_improvements_component(html_content):
    heading = "short_term_improvements"
    xml_data = newlines_to_sentences(html_content.text.strip())
    return prettify(f"<{heading}>{xml_data}</{heading}>", "short_term_improvements")
Beispiel #20
0
                cvr_contest_selection=[cvr_contest_selection])
        cvr_contests.append(cvr_contest)
        # end of processing a single contest for loop
    # end of loop over all contests on a single ballot

#
# Now we put actual ballot together - we take the choices from ballot and bundle them together as a 'Snapshot', and wrap them in a 'CVR'
# We also need to say what election this is for, which is why we had to create that object before setting up the specific CVR records
#
    cvr = CVR(id="_cvr_{}".format(cvr_id),
              election=fall18_wd9,
              cvr_snapshot=[
                  CVRSnapshot(id='_cvr_snapshot_{}_001'.format(cvr_id),
                              cvr_contests=cvr_contests)
              ])

    cvrs.append(cvr)
#end of processing all ballots

#
# Finally, put the election metadata together with the ballot-level results and call it a report.
#
fall18_wd9_cvr_report = CastVoteRecordReport(election=fall18_wd9,
                                             cvrs=cvrs,
                                             gp_unit=ward9,
                                             reporting_device=ward9_tabulator,
                                             parties=parties)
fall18_xml = fall18_wd9_cvr_report.to_xml()

print(prettify(fall18_xml))
Beispiel #21
0
 def __init__(self, subject, file_name):
     self.subject = subject
     self.pretty_name = prettify(file_name)
     self.file_name = file_name
     self.file_path = "sounds/{0}/{1}/{2}".format(
         subject.category.folder_name, subject.folder_name, file_name)
Beispiel #22
0
 def __init__(self, app, folder_name):
     self.folder_name = folder_name
     self.pretty_name = prettify(folder_name)
     self.subjects = Subject.list(app, self)
     self.image_path = "sounds/{0}/image.png".format(folder_name)
Beispiel #23
0
def home():
    text = request.args.get('text')
    result = spert_clinet(text)
    pretty_json, rel_triples = prettify(result)
    return jsonify(pretty_json)