#Converts lists to xml files
import xml.etree.cElementTree as ET

students = [1, 2]
assignments = [100, 101, 102]
scores = [0, 4, 10]
results = ET.Element("results")
result = ET.SubElement(results, "result")
student = ET.SubElement(result, "student")
assignment = ET.SubElement(result, "assignment")
score = ET.SubElement(result, "score")

for s in students:
    for a in range(len(assignments)):
        student.text = str(s)
        assignment.text = str(assignments[a])
        score.text = str(scores[a])
        results.append(result)
tree = ET.ElementTree(results)
tree.write('test.xml')
Example #2
0
    def generate_nzb(self):
        filename = 'newsmangler_%s.nzb' % (SafeFilename(self._current_dir))

        self.logger.info('Begin generation of %s', filename)

        gentime = time.strftime('%Y-%m-%d %H:%M:%S UTC', time.gmtime())
        root = ET.Element('nzb')
        root.append(
            ET.Comment('Generated by newsmangler v%s at %s' %
                       (NM_VERSION, gentime)))

        for subject, msgids in self._msgids.items():
            posttime = msgids.pop(0)

            # file
            f = ET.SubElement(
                root, 'file', {
                    'poster': self.conf['posting']['from'],
                    'date': str(posttime),
                    'subject': subject,
                })

            # newsgroups
            groups = ET.SubElement(f, 'groups')
            for newsgroup in self.newsgroup.split(','):
                group = ET.SubElement(groups, 'group')
                group.text = newsgroup

            # segments
            segments = ET.SubElement(f, 'segments')
            temp = [(m._partnum, m, article_size)
                    for m, article_size in msgids]
            temp.sort()
            for partnum, article, article_size in temp:
                segment = ET.SubElement(segments, 'segment', {
                    'bytes': str(article_size),
                    'number': str(partnum),
                })
                segment.text = str(article.headers['Message-ID'][1:-1])

        # pretty print
        def indent(elem, level=0):
            i = "\n" + level * "  "
            if len(elem):
                if not elem.text or not elem.text.strip():
                    elem.text = i + "  "
                if not elem.tail or not elem.tail.strip():
                    elem.tail = i
                for elem in elem:
                    indent(elem, level + 1)
                if not elem.tail or not elem.tail.strip():
                    elem.tail = i
            else:
                if level and (not elem.tail or not elem.tail.strip()):
                    elem.tail = i

        with open(filename, 'wb') as nzbfile:
            indent(root)
            ET.ElementTree(root).write(nzbfile, xml_declaration=True)

        self.logger.info('End generation of %s', filename)
Example #3
0
 def dump_suites(self, suites):
     et = ET.ElementTree(self.convert_suites(suites))
     et.write(self.fout, encoding="UTF-8")
    def test_publishTemplate(self):

        templateResult = self.getPublishedTemplateData(
            globalVars.publishedTemplateID)
        self.writeFile(globalVars.publishedTemp_filename, templateResult)
        tree = ET.ElementTree(file=globalVars.publishedTemp_filename)
        root = tree.getroot()
        ET.ElementTree(root).write(globalVars.publishedTemp_filename,
                                   xml_declaration=False)
        DeplName = 'TestCase_77215'
        deplyDesc = testCaseDescriptionMapping.TestCase_77215
        statausDeploy = False
        self.log_data(" Going to Deploy Template :: ")
        deployResponse = self.deployTemplate(DeplName, deplyDesc)

        if deployResponse.status_code in (200, 201, 202, 203, 204):

            #Get Deployment Id
            deploymentRefId = self.getDeploymentId(DeplName)
            loop = 60
            deploymentLogSubPath = '/opt/Dell/ASM/deployments/'
            deploymentLogPath = deploymentLogSubPath + str(deploymentRefId)

            while loop:
                resDS, statDS = self.getDeploymentStatus(deploymentRefId)
                if resDS.lower() in ("in_progress"):
                    time.sleep(120)
                else:
                    if resDS.lower() in ("complete"):
                        statausDeploy = True
                        self.log_TestData([
                            "", "", "",
                            str(self.tc_Id), deplyDesc, 'Success',
                            'Template Deployed Successfully',
                            'Server : Blade Server',
                            "deploymentLogPath: %s" % deploymentLogPath,
                            deplyDesc
                        ])
                        self.log_data(
                            'Successfully Deployed Service for the Deployment Name : %s'
                            % DeplName)
                        self.log_data(
                            'Going to do SCVMM Validation before TearDown')
                        self.doScvmmValidation(globalVars.refIdSCVMM)
                        self.log_data(
                            'Now going to call the teardown of service ')
                        self.cleanDeployedService(deploymentRefId)
                        self.test_cleanDeployedTemplates(deploymentRefId)
                        #                         self.log_data( 'Now going to call the teardown of Template ')
                        #                         self.test_cleanePublishedTemplates()
                        self.log_data(
                            'Going to do SCVMM Validation after TearDown')
                        self.doScvmmValidation(globalVars.refIdSCVMM)
                        break
                    else:
                        print "Deployment Status: %s" % resDS
                        self.log_TestData([
                            "", "", "",
                            str(self.tc_Id), deplyDesc, 'Failed',
                            'Deployment Service Failed',
                            'Server : Blade Server',
                            "deploymentLogPath: %s" % deploymentLogPath,
                            deplyDesc
                        ])
                        self.log_data(
                            'Deployment Service Failed for the Deployment Name : %s'
                            % DeplName)
                        if resDS.lower(
                        ) == globalVars.deploymentStatus and globalVars.enableTearDownService:
                            self.log_data(
                                'Now going to call the teardown of service ')
                            self.cleanDeployedService(deploymentRefId)
                            self.test_cleanDeployedTemplates(deploymentRefId)

                        break
            loop -= 1

        else:
            self.log_TestData([
                "", "", "",
                str(self.tc_Id), deplyDesc, 'Failed',
                'Deployment Service Failed'
            ])
            self.log_data(
                'Deployment Service Failed for the Deployment Name : %s' %
                DeplName)

        return statausDeploy
Example #5
0
def new_junit_xml_tree():
    return ET.ElementTree(ET.Element('testsuites'))
Example #6
0
#!/usr/bin/env python

import xml.etree.cElementTree as ET
import sys, time

ctr = 1
delay = 1
while (ctr < 5):
    qos = '{http://www.cisco.com/nxos:1.0:qosmgr}'
    inraw = cli(
        'sh policy-map interface ethernet 4/23 input type queuing | xml | exclude "]]>]]>"'
    )
    tree = ET.ElementTree(ET.fromstring(inraw))
    root = tree.getroot()
    print "-", "Iteration %i" % (ctr), "-" * 55
    for i in root.iter(qos + 'ROW_cmap'):
        try:
            if i.find(qos + 'cmap-key').text == "2q4t-8e-in-q-default":
                drop_pkts = int(i.find(qos + 'que-dropped-pkts').text)
                print "Dropped Packets = ", drop_pkts
        except AttributeError:
            pass
    ctr += 1
    time.sleep(delay)
Example #7
0
                    else:
                        Overview.text = Overview.text + "\r" + curEpToWrite.description

            # collect all directors, guest stars and writers
            if getattr(myEp, 'director', None) is not None:
                persons_dict['Director'] += [x.strip() for x in myEp['director'].split('|') if x]
            if getattr(myEp, 'gueststars', None) is not None:
                persons_dict['GuestStar'] += [x.strip() for x in myEp['gueststars'].split('|') if x]
            if getattr(myEp, 'writer', None) is not None:
                persons_dict['Writer'] += [x.strip() for x in myEp['writer'].split('|') if x]

        # fill in Persons section with collected directors, guest starts and writers
        for person_type, names in persons_dict.iteritems():
            # remove doubles
            names = list(set(names))
            for cur_name in names:
                Person = etree.SubElement(Persons, "Person")
                cur_person_name = etree.SubElement(Person, "Name")
                cur_person_name.text = cur_name
                cur_person_type = etree.SubElement(Person, "Type")
                cur_person_type.text = person_type

        helpers.indentXML(rootNode)
        data = etree.ElementTree(rootNode)

        return data


# present a standard "interface" from the module
metadata_class = MediaBrowserMetadata
Example #8
0
    imageHeight = labelSlice.shape[1]

    # plt.ion()
    # plt.figure('test')
    # plt.imshow(labelSlice)

    nLabels, labels, stats, centroids = cv2.connectedComponentsWithStats(
        labelSlice)

    # if (len(stats) - 1) == 0:
    #         break
    flatlabels = labels.ravel()

    annotation_file = '/home/jinzeyu/Downloads/demo2.xml'

    tree = ET.ElementTree(file=annotation_file)
    root = tree.getroot()

    tree = ET.parse(annotation_file)
    tree.find('folder').text = 'touthdisea'
    tree.find('path').text = psdfile.split('/')[1]

    tree.find('filename').text = filename + '.png'
    size = tree.find('size')
    size.find('width').text = str(imageHeight)
    size.find('height').text = str(imageWidth)

    for componentIndex in range(len(stats) - 1):
        boolCurrentPart = (flatlabels == (componentIndex + 1))
        allIndices = np.arange(imageWidth * imageHeight)
        indicesCurrentPart = allIndices[boolCurrentPart]
Example #9
0
 def __init__(self, file_path):
     self.tree = ElementTree.ElementTree(file=file_path)
     self.root = self.tree.getroot()
Example #10
0
def SaveRecentFiles(e1, top):
    print "Number of config" + str(no_config / 3)
    ReadConfig()
    global target_path
    global config_path
    target_path = e1.get()
    config_path = 'config.txt'
    print "Current selected source " + target_path
    print "Current selected config " + config_path
    if len(target_path) > 0 and len(config_path) > 0:
        t1.config(state='normal')
        t1.delete("1.0", "end")
        t1.insert(END, 'Target: ' + target_path + '\n', 'big')
        t1.insert(END, 'Config: ' + config_path + '\n', 'big')
        t1.config(state='disabled')
    if len(target_path) <= 0:
        t1.config(state='normal')
        t1.insert(END, 'Target path missing ' + '\n', 'big')
        t1.config(state='disabled')
    if len(config_path) <= 0:
        t1.config(state='normal')
        t1.insert(END, 'Config path missing ' + '\n', 'big')
        t1.config(state='disabled')

    print "Currently we have " + str(
        len(list_source)) + " Sources. They are..."
    for source in list_source:
        print source

    if os.path.exists('recentlog.xml'):
        print "Log File Exists"
        tree = ET.parse('recentlog.xml')
        xmlroot = tree.getroot()
        source_found = 0
        config_found = 0
        new_list_source = []
        new_list_config = []
        for elementlist in xmlroot:
            if elementlist.get('name') == "source":
                for source in list_source:
                    source_found = 0
                    for element in elementlist:
                        if source == element.text:
                            source_found = 1
                            break
                    if source_found == 0:
                        new_list_source.append(source)
                for new_source in new_list_source:
                    ET.SubElement(elementlist, "field").text = new_source
                    tree = ET.ElementTree(xmlroot)
                tree.write('recentlog.xml')
            '''
                        if elementlist.get('name') == "config":
                                for config in list_config:
                                        config_found = 0        
                                        for element in elementlist:
                                                if config == element.text:
                                                        config_found = 1
                                                        break
                                        if config_found == 0:
                                                new_list_config.append(config)
                                for new_config in new_list_config:
                                        ET.SubElement(elementlist, "field").text = new_config
                                        tree = ET.ElementTree(xmlroot)
                                tree.write('recentlog.xml')
                        '''
        print "Length of new list " + str(len(new_list_source))
        print "Length of new list " + str(len(new_list_config))

    else:
        print "No file"
        xmlroot = ET.Element("root")
        doc_source = ET.SubElement(xmlroot, "doc", name="source")
        #doc_config = ET.SubElement(xmlroot, "doc1", name = "config")

        for element in list_source:
            ET.SubElement(doc_source, "field").text = element
        '''
                for element in list_config:
                        ET.SubElement(doc_config, "field").text = element
                '''

        tree = ET.ElementTree(xmlroot)
        tree.write("recentlog.xml")

    top.destroy()
def parseXML(xml_in, params, state):
    """
    parse the document XML
    """
    # import pdb; pdb.set_trace()
    # if two fragments of text are within LINE_TOLERANCE of each other they're
    # on the same line

    NO_INTERJECTION = re.compile(r'^.{1,3}' + re.escape(params['closing_mark']))

    # ENDING_MARK = re.compile('(\(Schluss der Sitzung:.\d{1,2}.\d{1,2}.Uhr\).*|Schluss der Sitzung)')

    debug = False

    found_ending_mark = False

    # get the page elements
    tree = ET.ElementTree(file=xml_in)
    pages = tree.getroot()

    if pages.tag != "pages":
        sys.exit("ERROR: pages.tag is %s instead of pages!" % pages.tag)

    text = []

    issue = 0
    # step through the pages
    for page in pages:
        # gets page_id
        page_id = page.attrib['id']

        # get all the textline elements
        textboxes = page.findall("./textbox")

        #print "found %s textlines" % len(textlines)
        # step through the textlines
        page_text = []
        # if page_id=='4':
        #     import pdb; pdb.set_trace()
        left =  [round(float(textbox.attrib["bbox"].split(',')[0:1][0])) for textbox in textboxes]

        left_margin = [i[0] for i in Counter(left).most_common(4)]

        # cnt_set_one = 0
        # cnt_set_two = 0

        # for e in left_margin:
        #     if (e in range(params["text_margin_first_left"] - 4, params["text_margin_first_left"] + 4) or
        #         e in range(params["text_margin_first_right"] - 4, params["text_margin_first_right"] + 4)):
        #         cnt_set_one += 1
        #     elif (e in range(params["text_margin_second_left"] - 4, params["text_margin_second_left"] + 4) or
        #         e in range(params["text_margin_second_right"] - 4, params["text_margin_second_right"] + 4)):
        #         cnt_set_two += 1

        # if cnt_set_one==0 and cnt_set_two==0:
        #     logging.warning('no x0 values within specified ranges' + page.attrib['id'])
        #     page_set = None
        # else:
        #     if cnt_set_one > cnt_set_two:
        #         page_set = 'first'
        #     else:
        #         page_set = 'second'

        page_set = 'first'

        for textbox in textboxes:
            # get the boundaries of the textline
            #import pdb; pdb.set_trace()
            textbox_bounds = [float(s) for s in textbox.attrib["bbox"].split(',')]
            #print "line_bounds: %s" % line_bounds

            # get all the texts in this textline
            lines = list(textbox)
            #print("found %s characters in this line." % len(chars))

            # combine all the characters into a single string
            textbox_text = ""
            poi = False
            for line, has_more in lookahead(lines):
                chars = list(line)
                for char in chars:
                    if poi: 
                        if char.attrib:
                            #if "Bold" not in char.attrib['font']:
                            if all(f not in char.attrib['font'] for f in bold_fonts):
                                #import pdb; pdb.set_trace()
                                textbox_text = textbox_text + '<poi_end>'
                                poi = False
                    elif char.attrib:
                        #if "Bold" in char.attrib['font']:
                        if any(f in char.attrib['font'] for f in bold_fonts):
                            #import pdb; pdb.set_trace()
                            textbox_text = textbox_text + '<poi_begin>'
                            poi = True
                    textbox_text = textbox_text + char.text
                if not has_more and poi:
                    textbox_text = textbox_text + '<poi_end>'

            textbox_text = textbox_text.replace('\n<poi_end>', '<poi_end>\n').replace('\t', ' ')
            # if 'Beifall' in textbox_text:
            #    import pdb; pdb.set_trace()
            # strip edge & multiple spaces
            textbox_text = re.sub(' +', ' ', textbox_text.strip())

            # removes header/footer
            if textbox_bounds[1]>params['header_bound'] and page_id not in ['1']:
                #import pdb; pdb.set_trace()
                print('removed header ' + textbox_text)
                continue

            # search for new topic on the agenda
            if textbox_bounds[0]>170 and textbox_bounds[0]<260:
                
                if any(x in textbox_text for x in l) and '<poi_begin>' in textbox_text:
                    # import pdb; pdb.set_trace()
                    issue += 1
                    textbox_text = '<issue_begin>' + textbox_text + '<issue_end>'

            # save a description of the line
            textbox = {'left': textbox_bounds[0], 'top': textbox_bounds[1], 'text': textbox_text, 'issue': issue}

            if page_set=='first':
                if textbox['left'] > params["text_margin_first_left"] + 5 and textbox['left'] < params["text_margin_first_right"] - 5:
                    textbox['text'] = '<interjection_begin>' + textbox['text'].replace('\n', '<interjection_end>\n<interjection_begin>') + '<interjection_end>'
                elif textbox['left'] > params["text_margin_first_right"] + 5:
                    textbox['text'] = '<interjection_begin>' + textbox['text'].replace('\n', '<interjection_end>\n<interjection_begin>') + '<interjection_end>'

                if textbox['left'] < params['text_margin_first_right'] - 5:
                    textbox['left'] = 30
                else:
                    textbox['left'] = 30
                    textbox['top'] = textbox['top']-1000
            elif page_set=='second':
                if textbox['left'] > params["text_margin_second_left"] + 5 and textbox['left'] < params["text_margin_second_right"] - 5:
                    textbox['text'] = '<interjection_begin>' + textbox['text'].replace('\n', '<interjection_end>\n<interjection_begin>') + '<interjection_end>'
                elif textbox['left'] > params["text_margin_second_right"] + 5:
                    textbox['text'] = '<interjection_begin>' + textbox['text'].replace('\n', '<interjection_end>\n<interjection_begin>') + '<interjection_end>'

                if textbox['left'] < params['text_margin_second_right'] - 5:
                    textbox['left'] = 30
                else:
                    textbox['left'] = 30
                    textbox['top'] = textbox['top']-1000

            page_text.append(textbox)

        #print "page %s has %s lines" % (page.attrib["id"], len(lines))

        # sort the lines by left, then top position
        # if debug:
        #     import pdb; pdb.set_trace()

        # if page_id == '5':
        #     import pdb; pdb.set_trace()
        page_text = sorted(page_text, key=lambda d: (d['issue'], -d['top']))

        # consolidate lines that have the same top (within tolerance)
        # consolidated_lines = []
        # line_segments = []
        # line_top = lines[0]['top']
        # for line in lines:
        #   if abs(line['top'] - line_top) < LINE_TOLERANCE:
        #       line_segments.append(line)

        #   else:
        #       # assure that text segments appear in the correct order
        #       line_segments.sort(key=itemgetter('left'))
        #       # create a new line object combining partial texts, preserving the left-most text position
        #       merged_line = dict(line_segments[0])
        #       merged_line['text'] = ""
        #       for item in line_segments:
        #           merged_line['text'] = merged_line['text'] + " " + item['text']

        #       consolidated_lines.append(merged_line)

        #       # reset
        #       line_segments = [line]
        #       line_top = line['top']
        #import pdb; pdb.set_trace()
        page_text = '\n\n'.join([e['text'] for e in page_text])

        text.append(page_text + '\n')

    # if not found_ending_mark:
    #     sys.exit('could not find closing mark; adjust regex')

    #import pdb; pdb.set_trace()
    return text
Example #12
0
with open(r"maj_champs_dict_field.json") as config_file:
    dict_field = json.load(config_file)

try:
    for nom_commune, code_insee in dict_field.items():
        select_commune = arcpy.MakeFeatureLayer_management(
            os.environ['USERPROFILE'] +
            '\\AppData\\Roaming\\ESRI\\Desktop10.5\\ArcCatalog\\187_agglo_sig.sde\\agglo.sig.habillage_communes_agglo',
            'commune_lyr')
        whereClause = "nom = '" + nom_commune + "'"
        arcpy.SelectLayerByAttribute_management(select_commune,
                                                "NEW_SELECTION", whereClause)
        print(nom_commune)

        tree = ET.ElementTree(file='maj_champs_xml_bdd.xml')
        for elem in tree.iter(tag='layer'):
            bdd = elem.get('bdd')
            name_layer = elem.get('name')
            field_nom_com = elem.findtext('nomcom')
            field_code_insee = elem.findtext('codeinsee')

            try:
                select_emplacement = arcpy.MakeFeatureLayer_management(
                    os.environ['USERPROFILE'] +
                    '\\AppData\\Roaming\\ESRI\\Desktop10.5\\ArcCatalog\\187_' +
                    bdd + '_sig.sde\\' + name_layer, 'lyr')
                arcpy.SelectLayerByLocation_management(select_emplacement,
                                                       'INTERSECT',
                                                       select_commune)
Example #13
0
from scipy import signal
import h5py
from time import time
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
import xml.etree.cElementTree as ET
from statsmodels.tsa.stattools import grangercausalitytests

data_path = '/is/ei/naji/Dropbox/Winter Semster 2014/Master Thesis/Real Data/Neuroscience'
cluster_path = '/agbs/cluster/naji/Linear Filters/real_data'
manuscript_path = '/is/ei/naji/Dropbox/Winter Semster 2014/Master Thesis/Manuscripts/'

mice_name = 'vvp01'
session_name = '2006-4-9_18-43-47'
#extracting the lfp sampling rate:
tree = ET.ElementTree(file=data_path + '/' + mice_name + '/' + session_name +
                      '/' + session_name + '.xml')
atype = tree.findall('fieldPotentials')
for btype in atype[0].findall('lfpSamplingRate'):
    sampling_rate = btype.text

with h5py.File(data_path + '/' + mice_name + '_' + session_name + '_CA1.h5',
               'r') as f:
    Y = f['/data'].value
with h5py.File(data_path + '/' + mice_name + '_' + session_name + '_CA3.h5',
               'r') as f:
    X = f['/data'].value

for i in range(32):
    plt.plot(
        np.arange(100 * 1250, 112 * 1250) / 1250.,
        X[100 * 1250:112 * 1250, i] + i * 10000)
Example #14
0
 def test_text_entry(self):
     wx_dt = wx.DateTime(2, 11, 2020, 5, 31)
     test_txt = "hoi"
     root = elTree.Element("root")
     insert_text_entry(root, wx_dt, test_txt)
     assert elTree.ElementTree(root).find("entry").text == test_txt
Example #15
0
    # sign in, get MyPlex response
    try:
        response = urlopener.open(request).read()
    except urllib2.HTTPError, e:
        if e.code==401:
            dprint(__name__, 0, 'Authentication failed')
            return ('', '')
        else:
            raise
    
    dprint(__name__, 1, "====== MyPlex sign in XML ======")
    dprint(__name__, 1, response)
    dprint(__name__, 1, "====== MyPlex sign in XML finished ======")
    
    # analyse response
    XMLTree = etree.ElementTree(etree.fromstring(response))
    
    el_username = XMLTree.find('username')
    el_authtoken = XMLTree.find('authentication-token')    
    if el_username is None or \
       el_authtoken is None:
        username = ''
        authtoken = ''
        dprint(__name__, 0, 'MyPlex Sign In failed')
    else:
        username = el_username.text
        authtoken = el_authtoken.text
        dprint(__name__, 0, 'MyPlex Sign In successfull')
    
    return (username, authtoken)
Example #16
0
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 14 09:13:25 2017

@author: Thijs IJPelaar

Dit script leest de safetyfactors uit berekende BM-grasdocumenten uit
"""

import xml.etree.cElementTree as ET
import os

#Aan te passen pad, selecteer de folder met de uitgerekende BM-grasbestanden
dir = r'\\tsclient\W\WB\1. Toetssporen\36-1\6. GEBU\6. Projectbestanden BM Gras'

with open('output.csv', 'w') as f:
    for xml in os.listdir(dir):
        tree = ET.ElementTree(file=os.path.join(dir,xml))
        for output in tree.iter(tag='Output'):
            if output.attrib['Key'] == '14':
                f.write(str(xml)+';'+str(output.attrib['FoS'])+'\n')
                print(xml,output.attrib['FoS'])
Example #17
0
def getXMLFromMultiplePMS(ATV_udid, path, type, options={}):
    queue = Queue.Queue()
    threads = []
    
    root = etree.Element("MediaConverter")
    root.set('friendlyName', type+' Servers')
    
    for uuid in g_PMS.get(ATV_udid, {}):
        if (type=='all' and getPMSProperty(ATV_udid, uuid, 'name')!='plex.tv') or \
           (type=='owned' and getPMSProperty(ATV_udid, uuid, 'owned')=='1') or \
           (type=='shared' and getPMSProperty(ATV_udid, uuid, 'owned')=='0') or \
           (type=='local' and getPMSProperty(ATV_udid, uuid, 'local')=='1') or \
           (type=='remote' and getPMSProperty(ATV_udid, uuid, 'local')=='0'):
            Server = etree.SubElement(root, 'Server')  # create "Server" node
            Server.set('name',    getPMSProperty(ATV_udid, uuid, 'name'))
            Server.set('address', getPMSProperty(ATV_udid, uuid, 'ip'))
            Server.set('port',    getPMSProperty(ATV_udid, uuid, 'port'))
            Server.set('baseURL', getPMSProperty(ATV_udid, uuid, 'baseURL'))
            Server.set('local',   getPMSProperty(ATV_udid, uuid, 'local'))
            Server.set('owned',   getPMSProperty(ATV_udid, uuid, 'owned'))
            
            baseURL = getPMSProperty(ATV_udid, uuid, 'baseURL')
            token = getPMSProperty(ATV_udid, uuid, 'accesstoken')
            PMS_mark = 'PMS(' + getPMSProperty(ATV_udid, uuid, 'address') + ')'
            
            Server.set('searchKey', PMS_mark + getURL('', '', '/Search/Entry.xml'))
            
            # request XMLs, one thread for each
            PMS = { 'baseURL':baseURL, 'path':path, 'options':options, 'token':token, \
                    'data': {'uuid': uuid, 'Server': Server} }
            t = Thread(target=getXMLFromPMSToQueue, args=(PMS, queue))
            t.start()
            threads.append(t)
    
    # wait for requests being answered
    for t in threads:
        t.join()
    
    # add new data to root XML, individual Server
    while not queue.empty():
            (data, XML) = queue.get()
            uuid = data['uuid']
            Server = data['Server']
            
            baseURL = getPMSProperty(ATV_udid, uuid, 'baseURL')
            token = getPMSProperty(ATV_udid, uuid, 'accesstoken')
            PMS_mark = 'PMS(' + getPMSProperty(ATV_udid, uuid, 'address') + ')'
            
            if XML==False:
                Server.set('size',    '0')
            else:
                Server.set('size',    XML.getroot().get('size', '0'))
                
                for Dir in XML.getiterator('Directory'):  # copy "Directory" content, add PMS to links
                    key = Dir.get('key')  # absolute path
                    Dir.set('key',    PMS_mark + getURL('', path, key))
                    Dir.set('refreshKey', getURL(baseURL, path, key) + '/refresh')
                    if 'thumb' in Dir.attrib:
                        Dir.set('thumb',  PMS_mark + getURL('', path, Dir.get('thumb')))
                    if 'art' in Dir.attrib:
                        Dir.set('art',    PMS_mark + getURL('', path, Dir.get('art')))
                    Server.append(Dir)
                
                for Playlist in XML.getiterator('Playlist'):  # copy "Playlist" content, add PMS to links
                    key = Playlist.get('key')  # absolute path
                    Playlist.set('key',    PMS_mark + getURL('', path, key))
                    if 'composite' in Playlist.attrib:
                        Playlist.set('composite', PMS_mark + getURL('', path, Playlist.get('composite')))
                    Server.append(Playlist)
                
                for Video in XML.getiterator('Video'):  # copy "Video" content, add PMS to links
                    key = Video.get('key')  # absolute path
                    Video.set('key',    PMS_mark + getURL('', path, key))
                    if 'thumb' in Video.attrib:
                        Video.set('thumb', PMS_mark + getURL('', path, Video.get('thumb')))
                    if 'parentKey' in Video.attrib:
                        Video.set('parentKey', PMS_mark + getURL('', path, Video.get('parentKey')))
                    if 'parentThumb' in Video.attrib:
                        Video.set('parentThumb', PMS_mark + getURL('', path, Video.get('parentThumb')))
                    if 'grandparentKey' in Video.attrib:
                        Video.set('grandparentKey', PMS_mark + getURL('', path, Video.get('grandparentKey')))
                    if 'grandparentThumb' in Video.attrib:
                        Video.set('grandparentThumb', PMS_mark + getURL('', path, Video.get('grandparentThumb')))
                    Server.append(Video)
    
    root.set('size', str(len(root.findall('Server'))))
    
    XML = etree.ElementTree(root)
    
    dprint(__name__, 1, "====== Local Server/Sections XML ======")
    dprint(__name__, 1, XML.getroot())
    dprint(__name__, 1, "====== Local Server/Sections XML finished ======")
    
    return XML  # XML representation - created "just in time". Do we need to cache it?
Example #18
0
def run_prestart(numdsnodes, guard_mode=False):
    testfolders_list = get_immediate_subdirectories(LOCAL_RUN_FOLDER)
    count = len(testfolders_list)
    keypairs = []

    # Generate keypairs (sort by public key)
    for x in range(0, count):
        process = Popen(["./tests/Zilliqa/genkeypair"],
                        stdout=PIPE,
                        universal_newlines=True)
        (output, err) = process.communicate()
        exit_code = process.wait()
        keypairs.append(output.strip())
    keypairs.sort()

    # Use pre generated key for guard mode in local testing
    # These keys are non critical and are only used for testing purposes
    if guard_mode == True:
        keypairs = [
            "02028CC4DEC0A756B42BD54905237B4E22FCC69D88CFEAA3797AEECF01D6A69E85 55009317F8B1FC7889EDF83742F684FB700EE8F970F7EDB8BDD6286A0F0A4CF1",
            "021D99F2E5ACBA39ED5ACC5DCA5EE2ADDE780FFD998E1DBF440FE364C3BE360A7B 50C26000FCC08867FC3B9C03385015179E4B63282CB356014233BB1877FCDBDD",
            "025C8ACD69AE4075D3F02CE07641329CEFAF6C1B24BE64187D2ECDBDD55CF934A2 67E57D32E7EF421B704C23B05A7600A56808F2910FC2944CD7C7F87684CF0F49",
            "025EA8FFF868B64D5722F16FB07FB93CBFF38C5381F975CA5D0A728AEA19DBC6BC BE0CE7A97F90D433D58C873A9334819FDBF2646E08F61B470ACF996C082F0BB7",
            "0268B2A0B5FFE2ADE7A38DF9A878281A8BFFA4F8EE18A20EE53F5ABA3BDDC6BF00 A351988F0776D25CE203EC21BDBAF4402E98A2A9A724C28A8E4FEC81F030AF55",
            "027612A13BFA87AB22C0B3166B14873C7BEB77F0A27970BB0D1788EAA5F1BB885A 9C0A58E554511887E39E1E9BC25874B921A13FAFBB7DD4D57C8EBE6D72938C7D",
            "028ED45F00C33680BFDBBADD8DDC98627BF18E7B49E83420C00DE9C9752FE8F33D 33DB54623E8AFDFE735E1D73C62F68C924811DD5CD3300FE5203B580E330A373",
            "02B006BEAAFABFB738822EB4DFE3C0872A426173FF4C154D083A9C0EC5EB78A0C6 51D0976A2A9E72198D78BF229E8AB34DD9AE9E093CB1B71B4853C6839130CB6B",
            "02B5D018B064A26998AD4553BD5D394E898043A9C5A4E414C9EA71F1B26E1CEF3F 13A8B01932B072D1225BDD550C5D4048F664D3AE2476F24FD1EFF123573563F5",
            "02C28CDFC2CE6CE00DC38F5DBE565034BE50D382CB63FC260DA91E6828F806AAB6 C106836B85D5498112A13081A60326988012F6B0ED39480E3AB5683C69E04CB7",
            "02E1626300DA30EFC798DEB27ED546BB2B4D1D8771E0907B1DAD3A0AD3BE1381CD 3016CB647ABBD35B85939FC716155777DCC41AEB54D5498A0A99BCE74A2BF119",
            "02F650040BC0F3158B5D1A870EFC0ABE84FF02A4021A222FC49F9AD070ACFA2DA7 797FD9258980F9669A86E006A679815D15BD899B607454C3EDF284846F13E7E7",
            "030DFD4A0CFD68016DF6EFC4A6BF0B67BA42ABEAF8D9AD65C25E97B9DA90CA4DB1 50481C07CA036990945EBE3A0B7D71FFCCF27CFF4B1DA03B1C8FB3660DA89552",
            "03129EAEF8A136355FB1485941A593B4BEC4DFC5504D1114138A6D92332005DC59 CC3927D2C18849E6CFA3EE6D9322718961D2F035A1359AD1F37EEF527B5FBB15",
            "037D49C420B04B9862BC1F0660544FF27F1D81EA0E5E7C161F1647FBF239F8780E EB4D499149C3582AD84CCB28E697EB0BCAF331CD7CBE43D5672C21BB9C17A477",
            "03A6738E9081002097DDC71E3B72F9BBA2C0482034B8E80512D1B2DA5FFFFCBF8E 1430F36192D10C8ABCD9A22B036788C13F17830E807FEC7073B5E14E4B171265",
            "03C58CD6B4C6A0E4FEC1D989D3218B67AFC82149039E174074C5FDFE58CB427028 38EB4FB242BCD8D4BBD9114307F9B7F1F90D8DB7DE0AEC356691BEC98642C062",
            "03D616566DE986ADAC0E51BAF8147155993D56CCE4834607CFDDF832C3CAFD00F7 B459705C716E12044AB8263F812A9E9153269DD1449FA3B40EAA489F844BF839",
            "03E4DA9E02B0830C98E99737FE63B34D068D086184583449C583A68F4DBA79BE64 7541CE8176B260B4A6D28578D2433D17A7948F3810E464C5288E86E7DCDC71AE",
            "03FB81D476B3CF161AFD1AE0B861ECC907111AB891DF82028DD3D3085E2460A574 224B31816F0B529F21B14D9F04C42E7F277A024758A57BB6E1B3DEBF39A38E72"
        ]

    nodes = ET.Element("nodes")
    dsnodes = ET.Element("dsnodes")

    # Store sorted keys list in text file
    keys_file = open(LOCAL_RUN_FOLDER + 'keys.txt', "w")
    for x in range(0, count):
        keys_file.write(keypairs[x] + '\n')
        keypair = keypairs[x].split(" ")

        if (x < numdsnodes):
            ET.SubElement(dsnodes, "pubk").text = keypair[0]
            peer = ET.SubElement(nodes, "peer")
            ET.SubElement(peer, "pubk").text = keypair[0]
            ET.SubElement(peer, "ip").text = '127.0.0.1'
            ET.SubElement(peer, "port").text = str(NODE_LISTEN_PORT + x)
    keys_file.close()

    #Create dsnodes file
    dsTree = ET.ElementTree(dsnodes)
    dsTree.write("dsnodes.xml")
    dsnodes.clear()

    # Create config_normal.xml with pubkey and IP info of all DS nodes
    tree = ET.ElementTree(nodes)
    tree.write("config_normal.xml")

    # Clear the element tree
    nodes.clear()

    # ds_whitelist.xml generation
    keys_file = open(LOCAL_RUN_FOLDER + 'keys.txt', "w")
    for x in range(0, count):
        keys_file.write(keypairs[x] + '\n')
        keypair = keypairs[x].split(" ")
        peer = ET.SubElement(nodes, "peer")
        ET.SubElement(peer, "pubk").text = keypair[0]
        ET.SubElement(peer, "ip").text = '127.0.0.1'
        ET.SubElement(peer, "port").text = str(NODE_LISTEN_PORT + x)
    keys_file.close()

    # Create ds_whitelist.xml with pubkey and IP info of all DS nodes
    tree = ET.ElementTree(nodes)
    tree.write("ds_whitelist.xml")

    # clear from ds_whitelist
    nodes.clear()

    address_nodes = ET.Element("address")
    # shard_whitelist.xml generation
    keys_file = open(LOCAL_RUN_FOLDER + 'keys.txt', "w")
    for x in range(0, count):
        keys_file.write(keypairs[x] + '\n')
        keypair = keypairs[x].split(" ")
        ET.SubElement(address_nodes, "pubk").text = keypair[0]
    keys_file.close()

    # Create shard_whitelist.xml with pubkey
    tree = ET.ElementTree(address_nodes)
    tree.write("shard_whitelist.xml")
Example #19
0
    def job():
        mydb = mysql.connector.connect(host="202.129.206.136",
                                       user="******",
                                       passwd="Por19030703",
                                       database="darunph3_bet")

        mycursor = mydb.cursor()
        textfilename = 'listteam.txt'
        url = 'http://www.darunphop.com/BET/datafile/' + textfilename
        r = requests.get(url)
        with open(textfilename, 'wb') as f:
            f.write(r.content)

        file = open(textfilename, 'r')

        for xlink in file:
            url = xlink
            webdriver_path = './chromedriver.exe'

            chrome_options = Options()
            chrome_options.add_argument('--headless')
            chrome_options.add_argument('--window-size=1920x1080')
            browser = webdriver.Chrome(executable_path=webdriver_path,
                                       chrome_options=chrome_options)

            browser.get(url)
            soup = BeautifulSoup(browser.page_source, 'html.parser')
            browser.quit()
            div = soup.find('div', {
                'class':
                'columns__item columns__item--68 columns__item--tab-100'
            })
            imgteam = div.find_all('img')
            header = soup.find('ul', {'class': 'list-breadcrumb'})
            header_1 = header.find_all('li')
            matchtime = soup.find('ul', {'class': 'list-details'})
            matchtime_1 = matchtime.find_all('p')
            data = soup.find('tfoot', {'id': 'match-add-to-selection'})
            datab = data.find_all('td')
            fcname = soup.find('title')
            fcname1 = fcname.text.replace(' - H2H stats, results, odds', '')

            named_tuple = time.localtime()  # get struct_time
            time_string = time.strftime("%m-%d-%Y, %H:%M:%S", named_tuple)

            i = len(datab)

            bet = 1

            nameteam = fcname1

            # result = firebase.post('/football',data={'nameteam':{'home': datab[2].text,'dew': datab[3].text,'away':datab[4].text ,'time':dt } })

            # print(getfirebase)
            # re1 = firebase.post('/betdata',{'bet':dt})
            array1 = [time_string, datab[2].text, datab[3].text, datab[4].text]
            headerRe = header_1[4].text.replace(' - ', '-')
            headerRe1 = headerRe.replace(' ', '')
            matchtimeRe = matchtime_1[0].text.replace('-', '')

            import datetime as dt
            from datetime import date
            from datetime import datetime
            from datetime import timedelta
            date_time_obj = dt.datetime.strptime(matchtimeRe, '%d.%m.%Y %H:%M')
            date_time_obj_bkk = (date_time_obj + timedelta(hours=6))
            now = datetime.now()
            dt_string = now.strftime("%Y-%m-%d %H:%M:%S")

            print(header_1[3].text)
            print(header_1[2].text)
            print(headerRe1)
            print(matchtimeRe)
            print(date_time_obj_bkk)
            print(dt_string)
            print(matchtime_1[1].text)
            print(array1)
            print(type(date_time_obj_bkk))
            nowdatetime = datetime.strptime(dt_string, '%Y-%m-%d %H:%M:%S')
            if date_time_obj_bkk >= nowdatetime:
                betStatus_1 = "0"
                print("not yet")
            else:
                betStatus_1 = "1"
                print("yet")

            flag = "https://www.betexplorer.com" + str(imgteam[0]['src'])
            cwd = os.getcwd()

            hometeamimg = str(imgteam[1]['src'])
            awayteamimg = str(imgteam[2]['src'])

            # crate XML
            root = ET.Element("root")
            ET.SubElement(root, "titleL").text = header_1[3].text
            ET.SubElement(root, "matchteam").text = headerRe1
            ET.SubElement(root, "datetime").text = matchtimeRe
            ET.SubElement(root, "leg").text = flag
            ET.SubElement(root, "hometeam").text = hometeamimg
            ET.SubElement(root, "awayteam").text = awayteamimg
            ET.SubElement(root, "score").text = matchtime_1[1].text

            tree = ET.ElementTree(root)

            tree.write(cwd + "\\" + headerRe1 + ".xml")

            fileName = Path(headerRe1 + ".csv")

            if fileName.is_file():
                print("csv already exist")
            else:
                with open(cwd + "\\" + headerRe1 + ".csv", "a",
                          newline='') as csvfile:
                    fieldnames = ['Date', 'Home', 'D', 'A']
                    writrer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                    writrer.writeheader()

            with open(cwd + "\\" + headerRe1 + ".csv", "a",
                      newline='') as csvfile:
                hadername = ["Date", "Home", "D", "A"]
                writrer = csv.DictWriter(csvfile, fieldnames=hadername)
                writrer.writerow({
                    "Date": time_string,
                    "Home": datab[2].text,
                    "D": datab[3].text,
                    "A": datab[4].text
                })

            server = 'www.darunphop.com'
            username = '******'
            password = '******'
            ftp_connection = ftplib.FTP(server, username, password)
            remote_path = "/domains/darunphop.com/public_html/BET/datafile/"
            ftp_connection.cwd(remote_path)
            fh = open(cwd + "\\" + headerRe1 + ".csv", 'rb')
            fl = open(cwd + "\\" + headerRe1 + ".xml", 'rb')
            csvfilename = headerRe1 + ".csv"
            xmlfilename = headerRe1 + ".xml"

            ftp_connection.storbinary('STOR %s' % csvfilename, fh)
            ftp_connection.storbinary('STOR %s' % xmlfilename, fl)
            print("uploaded ->  " + csvfilename + "," + xmlfilename)
            fh.close()

            sql = "INSERT IGNORE INTO betTeamName (betName,betNation,betL,betTime,betFleg,betimgHome,betimgAway,betStatus) VALUES (%s,%s, %s,%s,%s,%s,%s,%s)"
            val = [(headerRe1, header_1[2].text, header_1[3].text,
                    date_time_obj_bkk, flag, hometeamimg, awayteamimg,
                    betStatus_1)]

            mycursor.executemany(sql, val)
            mydb.commit()

            connection = mysql.connector.connect(host='202.129.206.136',
                                                 database="darunph3_bet",
                                                 user='******',
                                                 password='******')
            sql_Query = "SELECT betTime FROM betTeamName where betName =%s"
            betName = (headerRe1, )

            cursor = connection.cursor(buffered=True)
            cursor.execute(sql_Query, betName)
            record = cursor.fetchone()
            # selecting column value into varible
            betTime_value = str(record[0])
            print(betTime_value)

            print(mycursor.rowcount, "was inserted.")

            print(
                '==========================================================================================='
            )
Example #20
0
f = BytesIO()
plt.savefig(f, format="svg")

import xml.etree.cElementTree as ET

# filter definition for a gaussian blur
filter_def = """
  <defs  xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'>
    <filter id='dropshadow' height='1.2' width='1.2'>
      <feGaussianBlur result='blur' stdDeviation='3'/>
    </filter>
  </defs>
"""

# read in the saved svg
tree, xmlid = ET.XMLID(f.getvalue())

# insert the filter definition in the svg dom tree.
tree.insert(0, ET.XML(filter_def))

for l in [l1, l2]:
    # pick up the svg element with given id
    shadow = xmlid[l.get_label() + "_shadow"]
    # apply shadow filter
    shadow.set("filter", 'url(#dropshadow)')

fn = "svg_filter_line.svg"
print("Saving '%s'" % fn)
ET.ElementTree(tree).write(fn)
Example #21
0
    def _show_data(self, show_obj):
        """
        Creates an elementTree XML structure for a MediaBrowser-style series.xml
        returns the resulting data object.

        show_obj: a TVShow instance to create the NFO for
        """

        indexer_lang = show_obj.lang
        # There's gotta be a better way of doing this but we don't wanna
        # change the language value elsewhere
        lINDEXER_API_PARMS = sickbeard.indexerApi(show_obj.indexer).api_params.copy()

        lINDEXER_API_PARMS['actors'] = True

        if indexer_lang and not indexer_lang == 'en':
            lINDEXER_API_PARMS['language'] = indexer_lang

        if show_obj.dvdorder != 0:
            lINDEXER_API_PARMS['dvdorder'] = True

        t = sickbeard.indexerApi(show_obj.indexer).indexer(**lINDEXER_API_PARMS)

        tv_node = etree.Element("Series")

        try:
            myShow = t[int(show_obj.indexerid)]
        except sickbeard.indexer_shownotfound:
            logger.log(u"Unable to find show with id " + str(show_obj.indexerid) + " on " + sickbeard.indexerApi(
                show_obj.indexer).name + ", skipping it", logger.ERROR)
            raise

        except sickbeard.indexer_error:
            logger.log(
                u"" + sickbeard.indexerApi(show_obj.indexer).name + " is down, can't use its data to make the NFO",
                logger.ERROR)
            raise

        # check for title and id
        if getattr(myShow, 'seriesname', None) is None or getattr(myShow, 'id', None) is None:
            logger.log(u"Incomplete info for show with id " + str(show_obj.indexerid) + " on " + sickbeard.indexerApi(
                show_obj.indexer).name + ", skipping it", logger.ERROR)
            return False

        indexerid = etree.SubElement(tv_node, "id")
        if getattr(myShow, 'id', None) is not None:
            indexerid.text = str(myShow['id'])

        indexer = etree.SubElement(tv_node, "indexer")
        if show_obj.indexer != None:
            indexer.text = str(show_obj.indexer)

        SeriesName = etree.SubElement(tv_node, "SeriesName")
        if getattr(myShow, 'seriesname', None) is not None:
            SeriesName.text = myShow['seriesname']

        Status = etree.SubElement(tv_node, "Status")
        if getattr(myShow, 'status', None) is not None:
            Status.text = myShow['status']

        Network = etree.SubElement(tv_node, "Network")
        if getattr(myShow, 'network', None) is not None:
            Network.text = myShow['network']

        Airs_Time = etree.SubElement(tv_node, "Airs_Time")
        if getattr(myShow, 'airs_time', None) is not None:
            Airs_Time.text = myShow['airs_time']

        Airs_DayOfWeek = etree.SubElement(tv_node, "Airs_DayOfWeek")
        if getattr(myShow, 'airs_dayofweek', None) is not None:
            Airs_DayOfWeek.text = myShow['airs_dayofweek']

        FirstAired = etree.SubElement(tv_node, "FirstAired")
        if getattr(myShow, 'firstaired', None) is not None:
            FirstAired.text = myShow['firstaired']

        ContentRating = etree.SubElement(tv_node, "ContentRating")
        MPAARating = etree.SubElement(tv_node, "MPAARating")
        certification = etree.SubElement(tv_node, "certification")
        if getattr(myShow, 'contentrating', None) is not None:
            ContentRating.text = myShow['contentrating']
            MPAARating.text = myShow['contentrating']
            certification.text = myShow['contentrating']

        MetadataType = etree.SubElement(tv_node, "Type")
        MetadataType.text = "Series"

        Overview = etree.SubElement(tv_node, "Overview")
        if getattr(myShow, 'overview', None) is not None:
            Overview.text = myShow['overview']

        PremiereDate = etree.SubElement(tv_node, "PremiereDate")
        if getattr(myShow, 'firstaired', None) is not None:
            PremiereDate.text = myShow['firstaired']

        Rating = etree.SubElement(tv_node, "Rating")
        if getattr(myShow, 'rating', None) is not None:
            Rating.text = myShow['rating']

        ProductionYear = etree.SubElement(tv_node, "ProductionYear")
        if getattr(myShow, 'firstaired', None) is not None:
            try:
                year_text = str(datetime.datetime.strptime(myShow['firstaired'], '%Y-%m-%d').year)
                if year_text:
                    ProductionYear.text = year_text
            except:
                pass

        RunningTime = etree.SubElement(tv_node, "RunningTime")
        Runtime = etree.SubElement(tv_node, "Runtime")
        if getattr(myShow, 'runtime', None) is not None:
            RunningTime.text = myShow['runtime']
            Runtime.text = myShow['runtime']

        IMDB_ID = etree.SubElement(tv_node, "IMDB_ID")
        IMDB = etree.SubElement(tv_node, "IMDB")
        IMDbId = etree.SubElement(tv_node, "IMDbId")
        if getattr(myShow, 'imdb_id', None) is not None:
            IMDB_ID.text = myShow['imdb_id']
            IMDB.text = myShow['imdb_id']
            IMDbId.text = myShow['imdb_id']

        Zap2ItId = etree.SubElement(tv_node, "Zap2ItId")
        if getattr(myShow, 'zap2it_id', None) is not None:
            Zap2ItId.text = myShow['zap2it_id']

        Genres = etree.SubElement(tv_node, "Genres")
        for genre in myShow['genre'].split('|'):
            if genre:
                cur_genre = etree.SubElement(Genres, "Genre")
                cur_genre.text = genre

        Genre = etree.SubElement(tv_node, "Genre")
        if getattr(myShow, 'genre', None) is not None:
            Genre.text = "|".join([x for x in myShow["genre"].split('|') if x])

        Studios = etree.SubElement(tv_node, "Studios")
        Studio = etree.SubElement(Studios, "Studio")
        if getattr(myShow, 'network', None) is not None:
            Studio.text = myShow['network']

        Persons = etree.SubElement(tv_node, "Persons")
        if getattr(myShow, 'actors', None) is not None:
            for actor in myShow['_actors']:
                cur_actor = etree.SubElement(Persons, "Person")
                cur_actor_name = etree.SubElement(cur_actor, "Name")
                cur_actor_name.text = actor['name'].strip()
                cur_actor_type = etree.SubElement(cur_actor, "Type")
                cur_actor_type.text = "Actor"
                cur_actor_role = etree.SubElement(cur_actor, "Role")
                cur_actor_role_text = actor['role']
                if cur_actor_role_text != None:
                    cur_actor_role.text = cur_actor_role_text

        helpers.indentXML(tv_node)

        data = etree.ElementTree(tv_node)

        return data
Example #22
0
    def test_publishTemplate(self):

        templateResult = self.getPublishedTemplateData(
            globalVars.publishedTemplateID)
        self.writeFile(globalVars.publishedTemp_filename, templateResult)
        tree = ET.ElementTree(file=globalVars.publishedTemp_filename)
        root = tree.getroot()
        ET.ElementTree(root).write(globalVars.publishedTemp_filename,
                                   xml_declaration=False)
        DeplName = 'TestCase_102166'
        deplyDesc = 'Deploy 2 rack servers with ESXI with 2 storage volumes, Mem Enable and a cluster with HA Enable'
        self.log_data(" Going to Deploy Template :: ")
        deployResponse = self.deployTemplate(DeplName, deplyDesc)
        if deployResponse.status_code in (200, 201, 202, 203, 204):

            #Get Deployment Id
            deploymentRefId = self.getDeploymentId(DeplName)
            loop = 60
            deploymentLogSubPath = '/opt/Dell/ASM/deployments/'
            deploymentLogPath = deploymentLogSubPath + str(deploymentRefId)

            while loop:
                resDS, statDS = self.getDeploymentStatus(deploymentRefId)
                if resDS.lower() in ("in_progress"):
                    time.sleep(120)
                else:
                    if resDS.lower() in ("complete"):
                        self.log_TestData([
                            "", "", "",
                            str(self.tc_Id), 'Success',
                            'Template Deployed Successfully',
                            'Server : Rack Server',
                            "deploymentLogPath: %s" % deploymentLogPath
                        ])
                        self.log_data(
                            'Successfully Deployed Service for the Deployment Name : %s'
                            % DeplName)
                        self.log_data(
                            'Now going to call the teardown of service ')
                        self.cleanDeployedService(deploymentRefId)
                        self.test_cleanDeployedTemplates(deploymentRefId)
                        #                         self.log_data( 'Now going to call the teardown of Template ')
                        #                         self.test_cleanePublishedTemplates()
                        break
                    else:

                        self.log_TestData([
                            "", "", "",
                            str(self.tc_Id), 'Failed',
                            'Deployment Service Failed',
                            'Server : Rack Server',
                            "deploymentLogPath: %s" % deploymentLogPath
                        ])
                        self.log_data(
                            'Deployment Service Failed for the Deployment Name : %s'
                            % DeplName)
                        self.log_data(
                            'Now going to call the teardown of service ')
                        #self.cleanDeployedService(deploymentRefId)
                        #self.test_cleanDeployedTemplates(deploymentRefId)
                        break
            loop -= 1

        else:
            self.log_TestData([
                "", "", "",
                str(self.tc_Id), 'Failed', 'Deployment Service Failed'
            ])
            self.log_data(
                'Deployment Service Failed for the Deployment Name : %s' %
                DeplName)
    #add <key>Evernote Notebook</key><string> var ennotebook
    notebook = ET.SubElement(root, "ennotebook")
    notebook.set("name", "Evernote Notebook")
    notebook.text = ennotebook
    #add <key>Evernote Tags</key><dict>, for each tag extracted from coma-delim list create key with incrementing number suffix. ie. <key>Tag1</key><string>, <key>Tag2</key><string>, <key>Tag3</key><string> etc.
    tags = ET.SubElement(root, "entags")
    tags.set("name", "Evernote Tags")
    tags.text = entags  #THIS RETURNS ERROR WITH SPACES
    #add <key> Already Synced Entries</key><dict>, <key>UUID1</key><string>
    synced = ET.SubElement(root, "synced")
    synced.set("name", "Already Synced")
    sfile = ET.SubElement(synced, "sfile")
    sfile.set("date", today)
    sfile.text = "firstsync"

    tree = ET.ElementTree(root)
    tree.write("sync_list.xml")

#no matter what, ask for the user's gmail password to configure the smtplib to send email. do not store this variable, instead, ask for it each time it is run.
gmpass = getpass.getpass(
    "Please enter your Gmail password. This will only be stored for the current session: "
)
#if sync_list.xml exists get the above variables from the list
tree = ET.parse("sync_list.xml")
root = tree.getroot()

for elem in root.findall('filepath'):
    filepath = elem.text
for elem in root.findall('enmail'):
    enmail = elem.text
for elem in root.findall('gmailadd'):
Example #24
0
#tree=ET.ElementTree(file="/media/protik/PROTIK/dogs_data/Annotation/n02085620-Chihuahua/n02085620_7")
import os
#os.chdir("/media/protik/PROTIK/dogs_data/Annotation/n02085620-Chihuahua")
path_to_data = "/media/protik/PROTIK/dogs_data/Annotation"
j = 0
for foldername in os.listdir(path_to_data):
    path_each_folder = path_to_data + '/' + foldername
    i = 0
    newpath = '/home/protik/Pictures/images/'
    newpath = newpath + ` j `
    if not os.path.exists(newpath):
        os.makedirs(newpath)
    for filename in os.listdir(path_each_folder):
        path = path_each_folder + "/" + filename
        print path
        tree = ET.ElementTree(file=path)
        root = tree.getroot()
        xmin = ymin = xmax = ymax = 0
        for child in root:
            if child.tag == 'object':
                for attr in child:
                    if attr.tag == 'bndbox':
                        for data in attr:
                            if data.tag == 'xmin':
                                xmin = data.text
                            if data.tag == 'ymin':
                                ymin = data.text
                            if data.tag == 'xmax':
                                xmax = data.text
                            if data.tag == 'ymax':
                                ymax = data.text
    def test_publishTemplate(self):
        logger = self.getLoggerInstance()
        logger.info("getting published templateID in test_publishTemplate ")
        logger.info(globalVars.publishedTemplateID)

        testCaseID = self.getTestCaseID(__file__)
        #response = self.authenticate()

        templateResult = self.getPublishedTemplateData(
            globalVars.publishedTemplateID)
        self.writeFile(globalVars.publishedTemp_filename, templateResult)
        tree = ET.ElementTree(file=globalVars.publishedTemp_filename)
        root = tree.getroot()
        ET.ElementTree(root).write(globalVars.publishedTemp_filename,
                                   xml_declaration=False)
        DeplName = 'TestCase_104081'
        deplyDesc = testCaseDescriptionMapping.TestCase_104081
        deployResponse = self.deployTemplate(DeplName, deplyDesc)
        logger.info("printing the response from the deploy template")
        logger.debug(deployResponse.content)
        logger.info("printing the status code")
        logger.info(deployResponse.status_code)
        if deployResponse.status_code in (200, 201, 202, 203, 204):

            #Get Deployment Id
            deploymentRefId = self.getDeploymentId(DeplName)
            loop = 60
            deploymentLogSubPath = '/opt/Dell/ASM/deployments/'
            deploymentLogPath = deploymentLogSubPath + str(deploymentRefId)

            while loop:
                resDS, statDS = self.getDeploymentStatus(deploymentRefId)
                if resDS.lower() in ("in_progress"):
                    time.sleep(120)
                else:
                    if resDS.lower() in ("complete"):
                        print "Deployment Status: %s" % resDS
                        self.log_TestData([
                            "", "", "",
                            str(self.tc_Id), deplyDesc, 'Success',
                            'Template Deployed Successfully',
                            'Server : Blade Server',
                            "deploymentLogPath: %s" % deploymentLogPath
                        ])
                        self.log_data(
                            'Successfully Deployed Service for the Deployment Name : %s'
                            % DeplName)
                        self.log_data(
                            'Going to do VCenter Validation before  ScaleUp')
                        self.doVCenterValidations(globalVars.refIdVCenter)
                        break
                    else:
                        print "Deployment Status: %s" % resDS
                        self.log_TestData([
                            "", "", "",
                            str(testCaseID), deplyDesc, 'Failed',
                            'Deployment Service Failed',
                            'Server : Blade Server',
                            "deploymentLogPath: %s" % deploymentLogPath
                        ])
                        self.log_data(
                            'Deployment Service Failed for the Deployment Name : %s'
                            % DeplName)
                        break
            loop -= 1
            self.log_data(
                'Going to call the ScaleUp  for the testcase id : %s' %
                str(testCaseID))
            self.test_scaleUpTemplate(globalVars.publishedTemplateID,
                                      deploymentRefId, DeplName, deplyDesc)

        else:
            self.log_TestData([
                "", "", "",
                str(testCaseID), 'Failed', 'Deployment Service Failed'
            ])
            self.log_data(
                'Deployment Service Failed for the Deployment Name : %s' %
                DeplName)
Example #26
0
        if key in dict:
            ##print float(dict[key])
            counter += math.log10(float(dict[key]))
            n_counter += 1
    return safe_divide(counter, n_counter)


# load the xml files
p_files_list = glob.glob(
    "/Users/mingxi/Desktop/TEMP/DISS/Oral_Defense/feature_validation/taasc/demo/results_mod_parsed_small/*.xml"
)  # Create a list of all files in target folder
for files in p_files_list:  #iterate through files
    nwords = 0
    nsent = 0

    tree = ET.ElementTree(file=files)  #The file is opened by the XML parser

    punctuation = ". , ? ! ) ( % / - _ -LRB- -RRB- SYM ".split(" ")
    noun_tags = "NN NNS NNP NNPS VBG".split(
        " "
    )  #note that VBG is included because this list is only used for looking at dependents that will be a nominal
    verb_tags = "VB VBZ VBP VBD VBN VBG".split(
        " ")  #This is a list of verb tags
    nominals = "NN NNP NNPS NNS PRP PRP$ CD DT".split(" ")
    adjectives = "JJ JJR JJS".split(" ")
    verbs = "VB VBZ VBP VBD VBN VBG".split(" ")
    other = "RB ".split(" ")
    noun_mod = [
        "amod", "appos", "det", "goeswith", "mwe", "nn", "num", "poss", "cop",
        "advmod", "advcl", "rcmod", "vmod"
    ]  #note: cop is thrown in for convenience; #advmod and advcl added in .8.5 , "advmod", "advcl"
Example #27
0
while 1:
    period_ch = input('Choice the date range(1-4) or 5 to exit the program: ')

    if period_ch > 0 and period_ch < 5:
        date = ask_date(op_list[period_ch])
        if date != 'fail':
            filename = fetch_data(op_list[period_ch], date)
        else:
            print 'Return the wrong date format!'
        break
    elif period_ch == 5:
        break
    else:
        print 'Please input the value between 1-4 !!'

tree = et.ElementTree(file=filename + '.xml')
tree.getroot()
root = tree.getroot()

### Started parsing xml ###
show_in_screen = False
show_op = raw_input(
    'Ready parsing the xml data, would you like shows the data in the screen?[Y/N]'
)
if show_op[0] == 'Y' or show_op[0] == 'y':
    show_in_screen = True
print 'Start parsing the xml data and store the result to %s.csv ' % filename
pagetitles_file = open(filename + '.csv', 'w')
for child in root:
    parse_data(child, pagetitles_file, show_in_screen)
pagetitles_file.close()
def main():
    if len(sys.argv) < 2:
        print("Provide an OVAL file that contains inventory definitions.")
        print("This script extracts these definitions and writes them "
              "to STDOUT.")
        sys.exit(1)

    product = sys.argv[1]
    idname = sys.argv[2]
    cpeoutdir = sys.argv[3]
    ovalfile = sys.argv[4]
    cpedictfile = sys.argv[5]

    # parse oval file
    ovaltree = parse_xml_file(ovalfile)

    # extract inventory definitions
    # making (dubious) assumption that all inventory defs are CPE
    defs = ovaltree.find("./{%s}definitions" % oval_ns)
    inventory_defs = []
    for el in defs.findall(".//{%s}definition" % oval_ns):
        if el.get("class") != "inventory":
            continue
        inventory_defs.append(el)

    # Keep the list of 'id' attributes from untranslated inventory def elements
    inventory_defs_id_attrs = []

    defs.clear()
    [defs.append(inventory_def) for inventory_def in inventory_defs]
    # Fill in that list
    inventory_defs_id_attrs = \
        [inventory_def.get("id") for inventory_def in inventory_defs]

    tests = ovaltree.find("./{%s}tests" % oval_ns)
    cpe_tests = extract_referred_nodes(defs, tests, "test_ref")
    tests.clear()
    [tests.append(cpe_test) for cpe_test in cpe_tests]

    states = ovaltree.find("./{%s}states" % oval_ns)
    cpe_states = extract_referred_nodes(tests, states, "state_ref")
    states.clear()
    [states.append(cpe_state) for cpe_state in cpe_states]

    objects = ovaltree.find("./{%s}objects" % oval_ns)
    cpe_objects = extract_referred_nodes(tests, objects, "object_ref")
    env_objects = extract_referred_nodes(objects, objects, "id")
    objects.clear()
    [objects.append(cpe_object) for cpe_object in cpe_objects]

    # if any subelements in an object contain var_ref, return it here
    local_var_ref = extract_subelement(objects, 'var_ref')

    variables = ovaltree.find("./{%s}variables" % oval_ns)
    if variables is not None:
        cpe_variables = extract_referred_nodes(tests, variables, "var_ref")
        local_variables = extract_referred_nodes(variables, variables, "id")
        if cpe_variables:
            variables.clear()
            [variables.append(cpe_variable) for cpe_variable in cpe_variables]
        elif local_var_ref:
            for local_var in local_variables:
                if local_var.get('id') == local_var_ref:
                    variables.clear()
                    variables.append(local_var)
                    env_obj = extract_env_obj(env_objects, local_var)
                    objects.append(env_obj)
        else:
            ovaltree.remove(variables)

    # turn IDs into meaningless numbers
    translator = idtranslate.IDTranslator(idname)
    ovaltree = translator.translate(ovaltree)

    newovalfile = idname + "-" + product + "-" + os.path.basename(ovalfile)
    newovalfile = newovalfile.replace("oval-unlinked", "cpe-oval")
    ElementTree.ElementTree(ovaltree).write(cpeoutdir + "/" + newovalfile)

    # replace and sync IDs, href filenames in input cpe dictionary file
    cpedicttree = parse_xml_file(cpedictfile)
    newcpedictfile = idname + "-" + os.path.basename(cpedictfile)
    for check in cpedicttree.findall(".//{%s}check" % cpe_ns):
        checkhref = check.get("href")
        # If CPE OVAL references another OVAL file
        if checkhref == 'filename':
            # Sanity check -- Verify the referenced OVAL is truly defined
            # somewhere in the (sub)directory tree below CWD. In correct
            # scenario is should be located:
            # * either in input/oval/*.xml
            # * or copied by former run of "combine-ovals.py" script from
            #   shared/ directory into build/ subdirectory
            refovalfilename = check.text
            refovalfilefound = False
            for dirpath, dirnames, filenames in os.walk(os.curdir, topdown=True):
                # Case when referenced OVAL file exists
                for location in fnmatch.filter(filenames, refovalfilename + '.xml'):
                    refovalfilefound = True
                    break                     # break from the inner for loop

                if refovalfilefound:
                    break                     # break from the outer for loop

            shared_dir = \
                os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
            if shared_dir is not None:
                for dirpath, dirnames, filenames in os.walk(shared_dir, topdown=True):
                    # Case when referenced OVAL file exists
                    for location in fnmatch.filter(filenames, refovalfilename + '.xml'):
                        refovalfilefound = True
                        break                     # break from the inner for loop

                    if refovalfilefound:
                        break                     # break from the outer for loop

            # Referenced OVAL doesn't exist in the subdirtree below CWD:
            # * there's either typo in the refenced OVAL filename, or
            # * is has been forgotten to be placed into input/oval, or
            # * the <platform> tag of particular shared/ OVAL wasn't modified
            #   to include the necessary referenced file.
            # Therefore display an error and exit with failure in such cases
            if not refovalfilefound:
                error_msg = "\n\tError: Can't locate \"%s\" OVAL file in the \
                \n\tlist of OVAL checks for this product! Exiting..\n" % refovalfilename
                sys.stderr.write(error_msg)
                # sys.exit(1)
        check.set("href", os.path.basename(newovalfile))

        # Sanity check to verify if inventory check OVAL id is present in the
        # list of known "id" attributes of inventory definitions. If not it
        # means provided ovalfile (sys.argv[1]) doesn't contain this OVAL
        # definition (it wasn't included due to <platform> tag restrictions)
        # Therefore display an error and exit with failure, since otherwise
        # we might end up creating invalid $(ID)-$(PROD)-cpe-oval.xml file
        if check.text not in inventory_defs_id_attrs:
            error_msg = "\n\tError: Can't locate \"%s\" definition in \"%s\". \
            \n\tEnsure <platform> element is configured properly for \"%s\".  \
            \n\tExiting..\n" % (check.text, ovalfile, check.text)
            sys.stderr.write(error_msg)
            # sys.exit(1)

        # Referenced OVAL checks passed both of the above sanity tests
        check.text = translator.generate_id("{" + oval_ns + "}definition", check.text)

    ElementTree.ElementTree(cpedicttree).write(cpeoutdir + '/' + newcpedictfile)

    sys.exit(0)
Example #29
0
def write(dashi, eday, nday, fest, holiday):
    root = ET.Element('Root')

    for month in MONTHS:
        dashi[month].reverse()
        eday[month].reverse()
        nday[month].reverse()
        fest[month].reverse()

        nepali_days = [
            ('आईत\nSun', 'normal'),
            ('सोम\nMon', 'normal'),
            ('मंगल\nTue', 'normal'),
            ('बुध\nWed', 'normal'),
            ('बिही\nThu', 'normal'),
            ('शुक्र\nFri', 'normal'),
            ('शनि\nSat', 'holiday'),
        ]

        aid = 'http://ns.adobe.com/AdobeInDesign/4.0/'
        aid5 = 'http://ns.adobe.com/AdobeInDesign/5.0/'
        ns = {'xmlns:aid': aid, 'xmlns:aid5': aid5}
        for attr, uri in ns.items():
            etree.register_namespace(attr.split(':')[1], uri)

        sub_root = table = ET.SubElement(root, 'Root')
        table = ET.SubElement(
            sub_root, 'Table', {
                etree.QName(aid, 'table'): 'table',
                etree.QName(aid, 'trows'): '16',
                etree.QName(aid, 'tcols'): '14',
                etree.QName(aid5, 'tablestyle'): 'month',
            })

        # headers
        for d in nepali_days:
            ET.SubElement(
                table, 'Cell', {
                    etree.QName(aid, 'table'): 'cell',
                    etree.QName(aid, 'crows'): '1',
                    etree.QName(aid, 'ccols'): '2',
                    etree.QName(aid5, 'cellstyle'): 'nheader',
                    etree.QName(aid, 'cstyle'): d[1],
                }).text = d[0]

        for j in range(5):

            # festivals and dashi, colspan=3
            i = 0
            for _ in range(7):
                ET.SubElement(
                    table, 'Cell', {
                        etree.QName(aid, 'table'): 'cell',
                        etree.QName(aid, 'crows'): '3',
                        etree.QName(aid, 'ccols'): '1',
                        etree.QName(aid, 'ccolwidth'): FESTIVAL_WIDTH,
                        etree.QName(aid5, 'cellstyle'): 'fest',
                        etree.QName(aid, 'cstyle'): holiday[month][7 * j + i],
                    }).text = fest[month].pop()
                ET.SubElement(
                    table, 'Cell', {
                        etree.QName(aid, 'table'): 'cell',
                        etree.QName(aid, 'crows'): '1',
                        etree.QName(aid, 'ccols'): '1',
                        etree.QName(aid, 'ccolwidth'): DAY_WIDTH,
                        etree.QName(aid5, 'cellstyle'): 'dashi',
                        etree.QName(aid, 'cstyle'): holiday[month][7 * j + i],
                    }).text = dashi[month].pop()
                i += 1

            # nepali date
            i = 0
            for _ in range(7):
                ET.SubElement(
                    table, 'Cell', {
                        etree.QName(aid, 'table'): 'cell',
                        etree.QName(aid, 'crows'): '1',
                        etree.QName(aid, 'ccols'): '1',
                        etree.QName(aid, 'ccolwidth'): DAY_WIDTH,
                        etree.QName(aid5, 'cellstyle'): 'nday',
                        etree.QName(aid, 'cstyle'): holiday[month][7 * j + i],
                    }).text = nday[month].pop()
                i += 1

            # english date
            i = 0
            for _ in range(7):
                ET.SubElement(
                    table, 'Cell', {
                        etree.QName(aid, 'table'): 'cell',
                        etree.QName(aid, 'crows'): '1',
                        etree.QName(aid, 'ccols'): '1',
                        etree.QName(aid, 'ccolwidth'): DAY_WIDTH,
                        etree.QName(aid5, 'cellstyle'): 'eday',
                        etree.QName(aid, 'cstyle'): holiday[month][7 * j + i],
                    }).text = eday[month].pop()
                i += 1

    tree = ET.ElementTree(root)
    tree.write('output.xml', encoding='UTF-8', xml_declaration=True)
Example #30
0
    def run(self):
        if 'APPDATA' in os.environ:
            directory = os.environ['APPDATA'] + '\FileZilla'
        else:
            return

        interesting_xml_file = []
        info_xml_file = []
        if os.path.exists(os.path.join(directory, 'sitemanager.xml')):
            interesting_xml_file.append('sitemanager.xml')
            info_xml_file.append(
                'Stores all saved sites server info including password in plaintext'
            )

        if os.path.exists(os.path.join(directory, 'recentservers.xml')):
            interesting_xml_file.append('recentservers.xml')
            info_xml_file.append(
                'Stores all recent server info including password in plaintext'
            )

        if os.path.exists(os.path.join(directory, 'filezilla.xml')):
            interesting_xml_file.append('filezilla.xml')
            info_xml_file.append(
                'Stores most recent server info including password in plaintext'
            )

        if interesting_xml_file != []:
            pwdFound = []

            for i in range(len(interesting_xml_file)):

                xml_file = os.path.expanduser(directory + os.sep +
                                              interesting_xml_file[i])

                tree = ET.ElementTree(file=xml_file)
                root = tree.getroot()

                servers = root.getchildren()
                for ss in servers:
                    server = ss.getchildren()

                    jump_line = 0
                    for s in server:
                        s1 = s.getchildren()
                        values = {}
                        for s11 in s1:
                            if s11.tag == 'Host':
                                values[s11.tag] = s11.text

                            if s11.tag == 'Port':
                                values[s11.tag] = s11.text

                            if s11.tag == 'User':
                                values['Login'] = s11.text

                            if s11.tag == 'Pass':
                                try:
                                    # if base64 encoding
                                    if 'encoding' in s11.attrib:
                                        if s11.attrib['encoding'] == 'base64':
                                            values[
                                                'Password'] = base64.b64decode(
                                                    s11.text)
                                    else:
                                        values['Password'] = s11.text
                                except:
                                    values['Password'] = s11.text

                        # password found
                        if len(values) != 0:
                            pwdFound.append(values)
            # print the results
            return pwdFound

        else:
            pass