Ejemplo n.º 1
0
 def generate_code(self):
     FOLDER = "C:\Users\saikumar\workspace\Nand2Tetris\\07\\"
     file_name = string.rsplit(self._parser.get_filename(), '.', 1)[0]
     file_object = open(FOLDER + file_name + '.asm', 'w')
     
     # for eq, gt, lt we have to use control flow
     # once it sets required result back to stack
      
     jump_sequence = 0
     
     for line in self._parser.get_line():
         code_fragment = string.rsplit(line, ' ', 1)
         
         # Memory Access Instruction
         if len(code_fragment) == 2: 
             assembly_code = CodeWriter.VM_STACK_COMMANDS[code_fragment[0]]
             assembly_code = string.replace(assembly_code, 'X', code_fragment[1])
             file_object.write(assembly_code)
         elif len(code_fragment) == 1:
             if code_fragment[0] in ['add', 'sub', 'or', 'not', 'and', 'neg']:
                 assembly_code = CodeWriter.VM_STACK_COMMANDS[code_fragment[0]]
                 file_object.write(assembly_code)
             elif code_fragment[0] in ['eq', 'gt', 'lt']:
                 assembly_code = CodeWriter.VM_STACK_COMMANDS[code_fragment[0]]
                 assembly_code = string.replace(assembly_code, '_J', '_' + str(jump_sequence))
                 file_object.write(assembly_code)
                 jump_sequence += 1
                 
     file_object.write('(END)\n' +\
                        '@END\n' +\
                        '0;JMP'
                      )
Ejemplo n.º 2
0
def archiveimage(imguri, localpostpath):
    "save the image locally"
    # read image data
    imagedata = getcontentbinary(imguri)
    # take the last part of the path after "/"
    imagename = string.rsplit(imguri, "/", 1)[-1:][0]
    # take the last part of the string after "."
    extension = string.rsplit(imagename, ".", 1)[-1:][0]
    # if the extension not in common format, what is it?
    # TOFIX: corner cases
    # foo.bar (but really foo.bar.png)
    # foo     (but really foo.png)
    # foo.svg
    if extension.lower() not in ["jpg", "png", "gif"]:
        imagetype = imghdr.what(None, imagedata[:32])
        if imagetype == "jpeg":
            extension = "jpg"
        else:
            extension = imagetype
        filename = "%s.%s" % (imagename, extension)
    else:
        filename = imagename
    fullpath = "%s%s" % (localpostpath, filename)
    # save the image
    with open(fullpath, 'wb') as imagefile:
        imagefile.write(imagedata)
        logging.info("created image at %s" % (fullpath))
    return filename
Ejemplo n.º 3
0
    def fetch_testfiles(self):
        """Needed flash files, sis-files and testscripts from given matti scripts -folder are added to file list."""
        tmp_case_list = []
#        tmp_image_list = []
        os.chdir(os.path.normpath(self.configuration.matti_scripts))
        try:
            for path, _, names in os.walk(os.getcwd()):
                for name in names:
                    if re.search(r'.*?[.]rb\Z', name):
                        tmp_case_list.append((os.path.normpath(os.path.join(path, name)), os.path.join("ats3", "matti", "script", name)))
            if tmp_case_list:
                for tmp_case in tmp_case_list:
                    self.configuration.step_list.append(dict(path=os.path.join("§TEST_RUN_ROOT§", str(tmp_case[1])), name="Test case"))
            if self.configuration.flash_images:
                for image in self.configuration.flash_images:
                    tmp = string.rsplit(image, os.sep)
                    image_name = tmp[len(tmp)-1] 
                    self.configuration.image_list.append(os.path.join("ATS3Drop", "images", image_name))
            if self.configuration.sis_files:
                for sis in self.configuration.sis_files:
                    tmp = string.rsplit(sis, os.sep)
                    sis_name = tmp[len(tmp)-1] 
                    self.configuration.sis_list.append(dict(path=os.path.join("ATS3Drop", "sis", sis_name), dest=sis_name))
        except KeyError, error:
            _logger.error("Error in file reading / fetching!")
            sys.stderr.write(error)
Ejemplo n.º 4
0
 def run(self):
     PATH = "E:\\Nand2Tetris\\nand2tetris\\projects\\08\\FunctionCalls\\FibonacciElement"
     PATH = os.path.abspath(PATH)
     if os.path.isdir(PATH):
         file_name = string.rsplit(PATH, "\\", 1)[1]
         code_writer = CodeWriter(PATH=PATH)
         print "The path is directory " + PATH
         code_writer.set_filename(file_name + ".asm")
         code_writer.start_up_code()
         vm_files = [f for f in os.listdir(PATH) if f.find(".vm") > 0]
         if "Sys.vm" in vm_files:
             sysindex = vm_files.index("Sys.vm")
             vm_files[0], vm_files[sysindex] = vm_files[sysindex], vm_files[0]
         for file_name in vm_files:
             print file_name
             parser = Parser(file_name, PATH)
             parser.first_scan()
             code_writer.set_parser(parser)
             code_writer.generate_code()
         code_writer.terminate_code()
     else:
         print "The path is file " + PATH
         PATH, file_name = string.rsplit(PATH, "\\", 1)
         parser = Parser(file_name, PATH)
         parser.first_scan()
         code_writer = CodeWriter(PATH=PATH)
         code_writer.set_parser(parser)
         code_writer.set_filename(string.rsplit(file_name, ".", 1)[0] + ".asm")
         code_writer.start_up_code()
         code_writer.generate_code()
         code_writer.terminate_code()
Ejemplo n.º 5
0
def fetchData(command, prefix, lz = False):
	if not power.is_on(): return
	result = send_receive(command)
	if not result : return
	values = result.split('\r')
	print values
	for i in values:
		idx = values.index(i)
		# BGL: ['06/01/11', '30/06/04', '22749', '24069', '9857', '06/01/11 5', '04/04/10 1979']
		if idx == 5 and command == "BGL":
			splitted = string.rsplit(i, " ", 1)
			data[prefix+str(6)]=splitted[0]
			data[prefix+str(5)]=splitted[1]
		elif idx == 6 and command == "BGL":
			splitted = string.rsplit(i, " ", 1)
			data[prefix+str(8)]=splitted[0]
			data[prefix+str(7)]=splitted[1]
		elif idx == 4 and lz:
			splitted = string.rsplit(i, " ", 1)
			data[prefix+str(5)]=splitted[0]
			data[prefix+str(4)]=splitted[1]
		elif idx == 5 and lz:
			splitted = string.rsplit(i, " ", 1)
			data[prefix+str(7)]=splitted[0]
			data[prefix+str(6)]=splitted[1]
		else:
			data[prefix+str(idx)]=i
Ejemplo n.º 6
0
 def run(self):
     PATH = "E:\\Nand2Tetris\\nand2tetris\\projects\\08\\FunctionCalls\\FibonacciElement"
     PATH = os.path.abspath(PATH)
     if os.path.isdir(PATH):
         file_name = string.rsplit(PATH, '\\', 1)[1]
         code_writer = CodeWriter(PATH=PATH)
         print 'The path is directory ' + PATH
         code_writer.set_filename(file_name + '.asm')
         code_writer.start_up_code()
         vm_files = [f for f in os.listdir(PATH) if f.find('.vm') > 0]
         if 'Sys.vm' in vm_files:
             sysindex = vm_files.index('Sys.vm')
             vm_files[0], vm_files[sysindex] = vm_files[sysindex], vm_files[
                 0]
         for file_name in vm_files:
             print file_name
             parser = Parser(file_name, PATH)
             parser.first_scan()
             code_writer.set_parser(parser)
             code_writer.generate_code()
         code_writer.terminate_code()
     else:
         print 'The path is file ' + PATH
         PATH, file_name = string.rsplit(PATH, '\\', 1)
         parser = Parser(file_name, PATH)
         parser.first_scan()
         code_writer = CodeWriter(PATH=PATH)
         code_writer.set_parser(parser)
         code_writer.set_filename(
             string.rsplit(file_name, '.', 1)[0] + '.asm')
         code_writer.start_up_code()
         code_writer.generate_code()
         code_writer.terminate_code()
Ejemplo n.º 7
0
def archiveimage(imguri, localpostpath):
    "save the image locally"
    # read image data
    imagedata = getcontentbinary(imguri)
    # take the last part of the path after "/"
    imagename = string.rsplit(imguri, "/", 1)[-1:][0]
    # take the last part of the string after "."
    extension = string.rsplit(imagename, ".", 1)[-1:][0]
    # if the extension not in common format, what is it?
    # TOFIX: corner cases
    # foo.bar (but really foo.bar.png)
    # foo     (but really foo.png)
    # foo.svg
    if extension.lower() not in ["jpg", "png", "gif"]:
        imagetype = imghdr.what(None, imagedata[:32])
        if imagetype == "jpeg":
            extension = "jpg"
        else:
            extension = imagetype
        filename = "%s.%s" % (imagename, extension)
    else:
        filename = imagename
    fullpath = "%s%s" % (localpostpath, filename)
    # save the image
    with open(fullpath, 'wb') as imagefile:
        imagefile.write(imagedata)
        logging.info("created image at %s" % (fullpath))
    return filename
Ejemplo n.º 8
0
 def testMyDistro_restartSshService(self):
     """
     Test MyDistro.restartSshService()
     """
     cmd = 'service '+ waagent.MyDistro.ssh_service_name + ' status'
     sshpid=string.rsplit(waagent.RunGetOutput(cmd)[1],' ',1)
     waagent.MyDistro.restartSshService()
     assert sshpid is not string.rsplit(waagent.RunGetOutput(cmd)[1],' ',1),'ssh server pid is unchanged.'
Ejemplo n.º 9
0
def do_file(fn, logs_dir=LOGS_DIR, dynamic_dates=False, timezone=None, logfn_keepdir=False):
    if fn.endswith('.gz'):
        fp = gzip.GzipFile(fn)
        if logfn_keepdir:
            fnb = fn.replace('/', '__')
        else:
            fnb = os.path.basename(fn)
        if dynamic_dates:
            ofn = string.rsplit(fnb, '.', 1)[0]
        else:
            ofn = string.rsplit(fnb, '.', 2)[0]
    else:
        fp = open(fn)	# expect it ends with .log
        ofn = string.rsplit(os.path.basename(fn), '.', 1)[0]

    # if file has been done, then there will be a file denoting this in the META subdir
    ofn = '%s/META/%s' % (logs_dir, ofn)
    if os.path.exists(ofn):
        print "Already done %s -> %s (skipping)" % (fn, ofn)
        sys.stdout.flush()
        return

    print "Processing %s -> %s (%s)" % (fn, ofn, datetime.datetime.now())
    sys.stdout.flush()

    m = re.search('(\d\d\d\d-\d\d-\d\d)', fn)
    if m:
        the_date = m.group(1)
    else:
        the_date = None

    cnt = 0
    for line in fp:
        cnt += 1
        try:
            newline = do_split(line, linecnt=cnt, run_rephrase=True, date=the_date, do_zip=True, logs_dir=logs_dir,
                               dynamic_dates=dynamic_dates, timezone=timezone)
        except Exception as err:
            print "[split_and_rephrase] ===> OOPS, failed err=%s in parsing line %s" % (str(err), line)
            raise
        if ((cnt % 10000)==0):
            sys.stdout.write('.')
            sys.stdout.flush()
    print

    mdir = '%s/META' % logs_dir
    if not os.path.exists(mdir):
        os.mkdir(mdir)
    open(ofn, 'a').write(' ') 	    # mark META

    # close all file pointers
    for fn, fp in ofpset.items():
        fp.close()
        ofpset.pop(fn)

    print "...done (%s)" % datetime.datetime.now()
    
    sys.stdout.flush()
def do_file(fn, use_local_files, logs_dir=LOGS_DIR, dynamic_dates=False, timezone=None, logfn_keepdir=False):
    if fn.endswith('.gz'):
        fp = gzip.GzipFile(fn)
        if logfn_keepdir:
            fnb = fn.replace('/', '__')
        else:
            fnb = os.path.basename(fn)
        if dynamic_dates:
            ofn = string.rsplit(fnb, '.', 1)[0]
        else:
            ofn = string.rsplit(fnb, '.', 2)[0]
    else:
        fp = open(fn)	# expect it ends with .log
        ofn = string.rsplit(os.path.basename(fn), '.', 1)[0]

    # if file has been done, then there will be a file denoting this in the META subdir
    # ofn = '%s/META/%s' % (logs_dir, ofn)
    # if os.path.exists(ofn):
    #     print "Already done %s -> %s (skipping)" % (fn, ofn)
    #     sys.stdout.flush()
    #     return

    print "Processing %s -> %s (%s)" % (fn, ofn, datetime.datetime.now())
    sys.stdout.flush()

    date_pattern = getattr(edx2bigquery_config, 'TRACKING_LOG_REGEX_DATE_PATTERN', '')
    date_match = re.search(date_pattern, fn)

    the_date = date_match.group(1) if date_match else None

    cnt = 0
    for line in fp:
        cnt += 1
        try:
            newline = do_split(line, use_local_files, linecnt=cnt, run_rephrase=True, date=the_date, do_zip=True, logs_dir=logs_dir,
                               dynamic_dates=dynamic_dates, timezone=timezone)
        except Exception as err:
            print "[split_and_rephrase] ===> OOPS, failed err=%s in parsing line %s" % (str(err), line)
            raise
        if ((cnt % 10000)==0):
            sys.stdout.write('.')
            sys.stdout.flush()
    print

    mdir = '%s/META' % logs_dir
    if not os.path.exists(mdir):
        os.mkdir(mdir)
    open(ofn, 'a').write(' ') 	    # mark META

    # close all file pointers
    for fn, fp in ofpset.items():
        fp.close()
        ofpset.pop(fn)

    print "...done (%s)" % datetime.datetime.now()
    
    sys.stdout.flush()
Ejemplo n.º 11
0
def fetch_kafka():
    print "Downloading ", DOWNLOAD_URL
    exe('wget '+DOWNLOAD_URL)
    print DOWNLOAD_URL
    tgz=rsplit(DOWNLOAD_URL, "/", 1)[1]
    print "Unpacking ", tgz
    exe('tar -zxvf '+tgz)
    global KAFKA_SRC
    if KAFKA_SRC is None:
        KAFKA_SRC = rsplit(tgz, ".tgz", 1)[0]
Ejemplo n.º 12
0
 def testMyDistro_restartSshService(self):
     """
     Test MyDistro.restartSshService()
     """
     cmd = 'service ' + waagent.MyDistro.ssh_service_name + ' status'
     sshpid = string.rsplit(waagent.RunGetOutput(cmd)[1], ' ', 1)
     waagent.MyDistro.restartSshService()
     assert sshpid is not string.rsplit(
         waagent.RunGetOutput(cmd)[1], ' ',
         1), 'ssh server pid is unchanged.'
Ejemplo n.º 13
0
def fetch_kafka():
    print "Downloading ", DOWNLOAD_URL
    exe('wget '+DOWNLOAD_URL)
    print DOWNLOAD_URL
    tgz=rsplit(DOWNLOAD_URL, "/", 1)[1]
    print "Unpacking ", tgz
    exe('tar -zxvf '+tgz)
    global KAFKA_SRC
    if KAFKA_SRC is None:
        KAFKA_SRC = rsplit(tgz, ".tgz", 1)[0]
Ejemplo n.º 14
0
def convert_comment(comment):
    #discard any comments without timestamp (for now)
    if (comment[0] == None or comment[0] <= 0):
        return ''
    else:
    #Convert created_at timestamp to unix timestamp, as required for gource
        try:
            timestamp = timegm(time.strptime(comment[0], "%Y/%m/%d %H:%M:%S +0000"))
        except ValueError as ve:
            print(comment[0]+' - value error - '+str(ve))
            
            # maybe a different timezone 
#             if ('+' in comment[0] or '-' in comment[0]):
#                 if ('+' in comment[0]):
#                     split_time_and_timezone = string.rsplit(comment[0], '+', 1)
#                     multiplier = 1
#                 else: 
#                     split_time_and_timezone = string.rsplit(comment[0], '-', 1)
#                     multiplier = -1
#                 split_time = time.strptime(split_time_and_timezone[0], "%Y/%m/%d %H:%M:%S ")
#                 timezone = int(int(split_time_and_timezone[1])/100) * multiplier 
#                 split_time.tm_hour = split_time.tm_hour + timezone
#                 timestamp = timegm(split_time)

            # but just ignore different timezone for now - life is too short! TODO
            if ('+' in comment[0]):
                split_time_and_timezone = string.rsplit(comment[0], '+', 1)
            else:
                if ('-' in comment[0]):
                    split_time_and_timezone = string.rsplit(comment[0], '-', 1)
                else: 
                    return ''
#                    multiplier = -1
            timestamp = timegm(time.strptime(split_time_and_timezone[0], "%Y/%m/%d %H:%M:%S "))

        except Exception as e:
            print(comment[0]+' - exception - '+str(e))
            return ''         
        # return str(int(timestamp))+'|'+str(comment[1])+'|'+str(comment[2])+'|'+str(comment[3])+'|'+str(hex(timestamp % 0xFFFFFF))[2:]+'\n'
#         return_string timestamp |  username | type  |  path of track, as user/trackid (to get best clusters of a user)  | random_colour
#         if (comment[2] == 'A'):
#             return str(int(timestamp))+'|'+lower_case_str(comment[1])+'|'+comment[2]+'|'+comment[1]+'/'+str(comment[3])+'|'+str(hex(random.randint(0,0xFFFFCC)))[2:]+'\n'
#         else: 
        return_string = str(int(timestamp))
        return_string = return_string+'|'+lower_case_str(comment[1])
        return_string = return_string+'|'+str(comment[2])
        return_string = return_string+'|'+str(comment[4])
        return_string = return_string+'/'+str(comment[3])
        return_string = return_string+'|'+str(hex(random.randint(0,0xFFFFCC)))[2:]+'\n'
        return return_string
Ejemplo n.º 15
0
    def handle_starttag(self, tag, attrs):
        try:
            if tag == 'table' and ('class', 'infobox vcard') in attrs:
                self.in_table = True
            if tag == 'td':
                self.in_td = True
                return

            if self.in_table and self.in_data:
                if tag == 'br':
                    self.br_last = True
                    return
                for (k, v) in attrs:
                    if k == 'title':
                        if re.match(prop, v):

                            self.in_td_string += ("\n " + v +
                                                  " -- from tag --  : ")
                            if v in self.related_objs:
                                self.res_dict[v] = []
                                self.curr_v = v
                                self.curr_v_tag = tag
                            return
                        splitted = rsplit(v, "\\| |,|;|-")
                        for s in splitted:
                            if re.match(year, s) or (
                                    s in months) or self.re_year.match(v):
                                self.in_td_string += v + "-- from tag -- "
                                return
        except Exception as e:
            print "in handle_starttag"
            print e
Ejemplo n.º 16
0
def groupwiseAffineRegistration(ImageTxtFile,
                                ImrefFilename,
                                TransfoIniTxtFile=None):

    ImageList = fileToListe(ImageTxtFile)

    if TransfoIniTxtFile:
        TransfoIniList = fileToListe(TransfoIniTxtFile)
        if len(TransfoIniList) != len(ImageList):
            print "The number of images and transfoIni should be the same !"
            exit(-1)
        else:
            TransfoIniList = fileToListe(TransfoIniTxtFile)
    else:
        TransfoIniList = []
        for nom in fileToListe(ImageTxtFile):
            TransfoIniList.append(
                string.rsplit(addPrefixToFilename(nom, 'aff_'), '.')[0] +
                ".trf")

    param = []
    for i in range(len(ImageList)):
        param.append([
            RecalageAffineElementaire, ImrefFilename, ImageList[i],
            TransfoIniList[i]
        ])

    pool = multiprocessing.Pool()  # Create a group of CPUs to run on
    pool.map(lanceMultithreadFunctionWithMultipleParam, [p for p in param])
Ejemplo n.º 17
0
def sanitizeJDBCCliVector(cliVector):

    dsName = regularExpressionSearch(
        "/subsystem=datasources/data-source=(.*)/", cliVector)
    dsXAName = regularExpressionSearch(
        "/subsystem=datasources/xa-data-source=(.*)/", cliVector)

    if (dsName != ""):
        actualDSName = dsName
    elif (dsXAName != ""):
        actualDSName = dsXAName

    splitExpression = rsplit(actualDSName, "/", -1)
    newDSNameAndParams = ""
    subExpressionCount = 1
    if (len(splitExpression) > 1):
        for subExpression in splitExpression:
            if (subExpressionCount == 1):
                newDSNameAndParams = newDSNameAndParams + subExpression
            else:
                newDSNameAndParams = newDSNameAndParams + "\/" + subExpression

            subExpressionCount = subExpressionCount + 1

        if (dsName != ""):
            newCliVector = "/subsystem=datasources/data-source=" + newDSNameAndParams + "/"
        elif (dsXAName != ""):
            newCliVector = "/subsystem=datasources/xa-data-source=" + newDSNameAndParams + "/"

    else:
        newCliVector = cliVector

    return newCliVector
Ejemplo n.º 18
0
def xmlize_items(items, query):
    items_a = []

    for item in items:
        list = string.rsplit(item, "/", 1)
        name = list[-1]
        path = item if len(list) == 2 else ""

        complete = item
        if item.lower().startswith(query.lower()):
            i = item.find("/", len(query))
            if i != -1:
                complete = item[:(i+1)]

        items_a.append("""
    <item uid="%(item)s" arg="%(item)s" autocomplete="%(complete)s">
        <title>%(name)s</title>
        <subtitle>%(path)s</subtitle>
    </item>
        """ % {'item': item, 'name': name, 'path': path, 'complete': complete})

    return """
<?xml version="1.0"?>
<items>
    %s
</items>
    """ % '\n'.join(items_a)
Ejemplo n.º 19
0
    def cache_dir(self, path, env='base', include_empty=False):
        '''
        Download all of the files in a subdir of the master
        '''
        ret = []
        path = self._check_proto(path)
        for fn_ in self.file_list(env):
            if fn_.startswith(path):
                local = self.cache_file('salt://{0}'.format(fn_), env)
                if not fn_.strip():
                    continue
                ret.append(local)

        if include_empty:
            # Break up the path into a list containing the bottom-level directory
            # (the one being recursively copied) and the directories preceding it
            separated = string.rsplit(path, '/', 1)
            if len(separated) != 2:
                # No slashes in path. (This means all files in env will be copied)
                prefix = ''
            else:
                prefix = separated[0]
            for fn_ in self.file_list_emptydirs(env):
                if fn_.startswith(path):
                    dest = salt.utils.path_join(self.opts['cachedir'], 'files',
                                                env)
                    minion_dir = '%s/%s' % (dest, fn_)
                    if not os.path.isdir(minion_dir):
                        os.makedirs(minion_dir)
                    ret.append(minion_dir)
        return ret
Ejemplo n.º 20
0
    def extract_enums_asgml(self):
        """Extract Interlis Enumerations as GML"""
        enum_tables = self.extract_enums()
        # GML output
        gml = ElementTree.Element('FeatureCollection')
        gml.set('xmlns', 'http://ogr.maptools.org/')
        gml.set('xmlns:gml', 'http://www.opengis.net/gml')
        #<ogr:FeatureCollection
        #     xmlns:ogr="http://ogr.maptools.org/"
        #     xmlns:gml="http://www.opengis.net/gml">
        enumIdx = 0
        for name, defs in enum_tables.items():
            # enum name should not be longer than 63 chars, which is PG default name limit
            # Nutzungsplanung.Nutzungsplanung.Grundnutzung_Zonenflaeche.Herkunft.TYPE
            # -> enumXX_herkunft
            enumTypeName = string.rsplit(name, '.', maxsplit=1)[-1]
            curEnumName = "enum%d_%s" % (enumIdx, enumTypeName)
            enumIdx = enumIdx + 1
            for enumdef in defs:
                #  <gml:featureMember>
                #    <ogr:Grundzonen__GrundZonenCode__ZonenArt>
                #      <ogr:value>Dorfkernzone</ogr:value><ogr:id>0</ogr:id>
                #    </ogr:Grundzonen__GrundZonenCode__ZonenArt>
                #  </gml:featureMember>
                featureMember = ElementTree.SubElement(
                    gml, "gml:featureMember")
                feat = ElementTree.SubElement(featureMember, curEnumName)
                id = ElementTree.SubElement(feat, "id")
                id.text = str(enumdef['id'])
                enum = ElementTree.SubElement(feat, "enum")
                enum.text = enumdef['enum']
                enumtxt = ElementTree.SubElement(feat, "enumtxt")
                enumtxt.text = enumdef['enumtxt']

        return ElementTree.tostring(gml, 'utf-8')
def scanPortfolio(dir):
	
	output = list()
	
	listing = walk(dir)
	
	for step in listing:			
		title = rsplit(step[topdir], sep, 1)[1]
		if title != "":			
			print "creating portfolio set named '" + title + "'"
			
			images = list()
			images = filter(lambda f : find(f.lower(), ".jpg") > 0, step[dirfiles])
			images = map(lambda i : path.join(step[topdir], i), images)
			
			portfolioSet = dict()
			portfolioSet['title'] = title
			portfolioSet['images'] = images
			
			output.append(portfolioSet)
			
	output = sortPortfolios(output)
	
	layoutFile = open(path.join(dir, "layout.json"), 'w')
	layoutFile.write(JSONEncoder().encode(output))
Ejemplo n.º 22
0
def cmd_json_callback(client, userdata, msg):
    topicArray = string.rsplit(msg.topic, '/')
    obj = topicArray[len(topicArray) - 3]

    logger1.debug("Command JSON-type: " + msg.topic + "->" + msg.payload,
                  level="i")
    send_command(strCmd=msg.payload, nCmdType=2, destObj=int(obj))
Ejemplo n.º 23
0
 def tryget_managed_frame_info(self):
     sos = SosInterpreter()
     ip2mdOut = sos.ip2md(self.strIp)
     ip2mdProps = _str_to_dict(ip2mdOut)
     _dbg_write(str(ip2mdProps))
     if 'Method Name' in ip2mdProps: 
         self.strRoutine = ip2mdProps['Method Name']
         if 'Class' in ip2mdProps:
             classPtr = ip2mdProps['Class']
             if classPtr is not None and classPtr <> '':
                 classOut = sos.dumpclass(classPtr)
                 classProps = _str_to_dict(classOut)
                 _dbg_write(str(classProps))
                 if 'File' in  classProps:
                     strFile = classProps['File']
                     self.strModule = string.rsplit(string.rsplit(strFile, '.', 1)[0], '/', 1)[1] 
Ejemplo n.º 24
0
    def __init__(self, name, displayname, version_set_state, client_id, originator, workspace = None):
        DtrBaseObject.__init__(self, name)

        self.name = name
        self.displayname = displayname
        self.integrations = []
        self.version_set = []
        self.content_set = []
        self.workspace_name = None
        self.workspace = workspace
        self.originator = originator
        
        if version_set_state == "open":
            self.version_set_state = self.VERSIONSET_OPEN
        elif version_set_state == "closed":
            self.version_set_state = self.VERSIONSET_CLOSED
        else:
            self.version_set_state = self.VERSIONSET_UNKNOWN
        
        if not (client_id is None or client_id == ''):
            split_client = string.rsplit(client_id, ":", 1)
            self.client_hostname = split_client[1]
            self.client_path = split_client[0]
        else:
            self.client_hostname = None
            self.client_path = None
Ejemplo n.º 25
0
def create_http_request(flowheader, reqbuf):
    sfp = StringIO(reqbuf)
    method, url, httpversion = http.parse_init_http(sfp.readline())
    host, port, path = parse_url(url)
    headers = http.read_headers(sfp)

    if not host:
        if not headers.get("host"):
            host = flowheader.dstip
        else:
            host = headers.get("host")[0]
            if ":" in host:
                host = string.rsplit(host, ":", maxsplit=1)[0]

    if port == None:
        port = flowheader.dport

    # TODO: passing None as the second arg will produce and error if "expect" is in the headers
    content = http.read_http_body_request(sfp, None, headers, httpversion, None)

    # content = http.read_http_body(sfp, headers, True, None)
    return flow.Request(
        None,
        httpversion,
        host,
        port,
        "http",
        method,
        path,
        headers,
        content,
        flowheader.ts_request_start,
        flowheader.ts_request_finish,
    )
Ejemplo n.º 26
0
	def MagnetSetHeater(self, State):
		
		HeaterBefore = self.MagnetReadHeater()
		if State == 1:
			Reply = self.Visa.ask("SET:DEV:GRPZ:PSU:SIG:SWHT:ON")	
		elif State == 0:
			Reply = self.Visa.ask("SET:DEV:GRPZ:PSU:SIG:SWHT:OFF")
		else:
			print "Error cannot set switch heater\n"

		Answer = string.rsplit(Reply,":",1)[1]
		if Answer == "VALID":
			Heater = 1
		elif Answer == "INVALID":
			Heater = 0
		else:
			Heater = -1

		HeaterAfter = self.MagnetReadHeater()	
		if HeaterAfter != HeaterBefore:
			print "Heater Switched! Waiting 4 min\n"
			time.sleep(240)
			print "Finished wait!\n"

		return Heater
Ejemplo n.º 27
0
 def tryget_managed_frame_info(self):
     sos = SosInterpreter()
     ip2mdOut = sos.ip2md(self.strIp)
     ip2mdProps = _str_to_dict(ip2mdOut)
     _dbg_write(str(ip2mdProps))
     if 'Method Name' in ip2mdProps: 
         self.strRoutine = ip2mdProps['Method Name']
         if 'Class' in ip2mdProps:
             classPtr = ip2mdProps['Class']
             if classPtr is not None and classPtr <> '':
                 classOut = sos.dumpclass(classPtr)
                 classProps = _str_to_dict(classOut)
                 _dbg_write(str(classProps))
                 if 'File' in  classProps:
                     strFile = classProps['File']
                     self.strModule = string.rsplit(string.rsplit(strFile, '.', 1)[0], '/', 1)[1] 
Ejemplo n.º 28
0
 def get_symbol(self, strIp):
     strRoutine='UNKNOWN'
     strModule='UNKNOWN'
     ip2mdOut = self.ip2md(strIp)
     ip2mdProps = _str_to_dict(ip2mdOut)
     if 'Method Name' in ip2mdProps: 
         strRoutine = ip2mdProps['Method Name'].split('(')[0]
         if 'Class' in ip2mdProps:
             classPtr = ip2mdProps['Class']
             if classPtr is not None and classPtr <> '':
                 classOut = self.dumpclass(classPtr)
                 classProps = _str_to_dict(classOut)
                 if 'File' in  classProps:
                     strFile = classProps['File']
                     strModule = string.rsplit(string.rsplit(strFile, '.', 1)[0], '/', 1)[1] 
     return strModule + '!' + strRoutine
Ejemplo n.º 29
0
    def _word_repl(self, word, groups):
        if self.in_dd:
            name = groups.get('word_name')
            current_page = self.formatter.page.page_name
            abs_name = wikiutil.AbsPageName(current_page, name)
            if abs_name == current_page:
                return self.__real_val(abs_name)
            else:
                # handle anchors
                try:
                    abs_name, anchor = rsplit(abs_name, "#", 1)
                except ValueError:
                    anchor = ""
                if self.cat_re.match(abs_name):
                    return self.__real_val(abs_name)

                else:
                    if not anchor:
                        wholename = abs_name
                    else:
                        wholename = "%s#%s" % (abs_name, anchor)

                    return self.__real_val(wholename)

        return apply(wikiParser._word_repl, (self, word, groups))
Ejemplo n.º 30
0
    def updateFN(self):

        #ext = item[-3:]        

        pr = self.projects_CB.currentText()
        ep = self.episodes_CB.currentText()
        sq = self.sequence_CB.currentText()
        tk = self.task_CB.currentText()
        nn = self.nn_LE.text()
        self.ext = string.rsplit(cmds.file(q=True,sn=True),"/",1)[1].split(".")[-1]
        
        if tk is not "data":
            self.taskRootPath = self.seqRootPath + str(tk) + "/"
            
            if nn == "":
                self.sceneName = str(sq)+"_"+str(tk) + "." + self.ext
            else:
                if re.search("s[0-9][0-9][0-9][0-9]",nn):
                    self.sceneName = str(sq)+"_"+str(nn)+ "_"+str(tk)+"." + self.ext
                else:
                    self.sceneName = str(sq)+"_"+str(tk)+"_"+str(nn) +"." + self.ext
            
            self.fn_LE.setText(self.sceneName)
        if str(tk) == "data":
            if nn == "":
                self.sceneName = str(sq) + "_" + "." + self.ext
            else:
                if re.search("s[0-9][0-9][0-9][0-9]",nn):
                    self.sceneName = str(sq)+"_"+str(nn)+ "." + self.ext
                else:
                    self.sceneName = str(sq)+"_"+str(nn) + "." + self.ext
            
            self.fn_LE.setText(self.sceneName)
Ejemplo n.º 31
0
def event_callback(client, userdata, msg):
    global db
    #    print(str(datetime.now()) + " " + "event: "+msg.topic+"->"+msg.payload)
    logger1.info("event: " + msg.topic + "->" + msg.payload)
    topicArray = string.rsplit(msg.topic, '/')
    obj = topicArray[len(topicArray) - 2]
    # {"a": 0, "c": 3, "t": 1432600485, "d": 1004, "v": 3238}
    try:
        e_data = json.loads(msg.payload)
        # log only action events:
        if (e_data["a"]):
            try:
                db.events.insert_one({
                    "ts": datetime.datetime.utcnow(),
                    "acc": ACCOUNT,
                    "tt": e_data["t"],
                    "obj": int(obj, 10),
                    "dev": e_data["d"],
                    "ch": e_data["c"],
                    "val": e_data["v"],
                    "act": e_data["a"]
                })
            except Exception as ex:
                #                print("MongoDB error={}: {}".format(e_data,ex))
                logger1.error("MongoDB error={}: {}".format(e_data, ex))
    except Exception as ex:
        #        print("Cannot decode JSON object, payload={}: {}".format(msg.payload,ex))
        logger1.warn("Cannot decode JSON object, payload={}: {}".format(
            msg.payload, ex))
Ejemplo n.º 32
0
 def shorten_name(self, src_name, prefix, splitchar='.'):
     # Nutzungsplanung.Nutzungsplanung.Grundnutzung_Zonenflaeche.Herkunft
     # -> enumXX_herkunft
     short_name = string.rsplit(src_name, splitchar, maxsplit=1)[-1]
     short_name = "%s%d_%s" % (prefix, self._name_seq, short_name)
     self._name_seq = self._name_seq + 1
     return short_name
Ejemplo n.º 33
0
def importPlanner(module_or_file_name=None):
    if module_or_file_name is None:
        if len(argv) != 2:
            print "Usage: %s <planner-module>" % argv[0]
            exit(2)    
        module_or_file_name = argv[1].strip()
    
    if module_or_file_name.endswith(".py"):
        module_or_file_name = module_or_file_name[:-3]
    try:
        dirname, filename = string.rsplit(module_or_file_name, '/', 1)
        path.append(dirname)
    except ValueError:
        filename = module_or_file_name
    
    module = __import__(filename)
    try:
        if hasattr(module, 'controller'):
            if hasattr(module, 'graceful_exit'):
                return module.update, module.controller, module.graceful_exit
            return module.update, module.controller
        else:
            return module.update
    except AttributeError:
        raise AttributeError("The planner module must have an update() function.")
Ejemplo n.º 34
0
    def _word_repl(self, word, groups):
        """Handle WikiNames."""
        bang_present = groups.get('word_bang')
        if bang_present:
            if self.cfg.bang_meta:
                return self.formatter.nowikiword("!%s" % word)
            else:
                self.formatter.text('!')

        name = groups.get('word_name')
        current_page = self.formatter.page.page_name
        abs_name = wikiutil.AbsPageName(current_page, name)
        if abs_name == current_page:
            self.currentitems.append(('wikilink', (abs_name, abs_name)))
            self.__add_meta(abs_name, groups)
            return u''
        else:
            # handle anchors
            try:
                abs_name, anchor = rsplit(abs_name, "#", 1)
            except ValueError:
                anchor = ""
            if self.cat_re.match(abs_name):
                self.currentitems.append(('category', (abs_name)))
                self.__add_meta(abs_name, groups)

            else:
                if not anchor:
                    wholename = abs_name
                else:
                    wholename = "%s#%s" % (abs_name, anchor)

                self.currentitems.append(('wikilink', (wholename, abs_name)))
                self.__add_meta(wholename, groups)
            return u''
Ejemplo n.º 35
0
def apply_anchors(text):
    """Parses the given text, applying <a> tags to URLs and returning the result

    E.g.: If text is:
        "test https://labjack.com/support/. End."
    returns:
        "test <a href=\"https://labjack.com/support/\">https://labjack.com/support/</a>. End."

    URLs are not allowed to contain a trailing comma, period, or semi-colon.

    @param text: text to apply anchor tags to
    @type text: str
    """
    url_tuples = FIND_URLS.findall(text)
    for url_tuple in url_tuples:
        url = url_tuple[0]
        end_punc = FIND_ENDING_PUNCTUATION.search(url)
        if end_punc:
            url = string.rsplit(url, end_punc.group(1), 1)[0]

        # pos = text.find(url)
        # if pos == -1:
        #     raise ValueError('expected to find URL %s in text %s' % (url, text))
        parts = string.split(text, url, 1)
        text = parts[0] + (
            "<a target='_blank' href='%s'>"
            "%s"
            "</a>"
            "<img "
            "style='margin-right: -1;' "
            "src='https://ljsimpleregisterlookup.herokuapp.com/static/images/ui-icons-extlink.png' />"
        ) % (url, url) + parts[1]

    return text
Ejemplo n.º 36
0
    def __init__(self,
                 name,
                 displayname,
                 version_set_state,
                 client_id,
                 originator,
                 workspace=None):
        DtrBaseObject.__init__(self, name)

        self.name = name
        self.displayname = displayname
        self.integrations = []
        self.version_set = []
        self.content_set = []
        self.workspace_name = None
        self.workspace = workspace
        self.originator = originator

        if version_set_state == "open":
            self.version_set_state = self.VERSIONSET_OPEN
        elif version_set_state == "closed":
            self.version_set_state = self.VERSIONSET_CLOSED
        else:
            self.version_set_state = self.VERSIONSET_UNKNOWN

        if not (client_id is None or client_id == ''):
            split_client = string.rsplit(client_id, ":", 1)
            self.client_hostname = split_client[1]
            self.client_path = split_client[0]
        else:
            self.client_hostname = None
            self.client_path = None
Ejemplo n.º 37
0
    def extract_enums_asgml(self):
        """Extract Interlis Enumerations as GML"""
        enum_tables = self.extract_enums()
        #GML output
        gml = ElementTree.Element('FeatureCollection')
        gml.set('xmlns', 'http://ogr.maptools.org/')
        gml.set('xmlns:gml', 'http://www.opengis.net/gml')
        #<ogr:FeatureCollection
        #     xmlns:ogr="http://ogr.maptools.org/"
        #     xmlns:gml="http://www.opengis.net/gml">
        enumIdx = 0
        for name, defs in enum_tables.items():
            #enum name should not be longer than 63 chars, which is PG default name limit
            #Nutzungsplanung.Nutzungsplanung.Grundnutzung_Zonenflaeche.Herkunft.TYPE -> enumXX_herkunft
            enumTypeName = string.rsplit(name, '.', maxsplit=1)[-1]
            curEnumName = "enum%d_%s" % (enumIdx, enumTypeName)
            enumIdx = enumIdx + 1
            for enumdef in defs:
                #  <gml:featureMember>
                #    <ogr:Grundzonen__GrundZonenCode__ZonenArt>
                #      <ogr:value>Dorfkernzone</ogr:value><ogr:id>0</ogr:id>
                #    </ogr:Grundzonen__GrundZonenCode__ZonenArt>
                #  </gml:featureMember>
                featureMember = ElementTree.SubElement(gml, "gml:featureMember")
                feat = ElementTree.SubElement(featureMember, curEnumName)
                id = ElementTree.SubElement(feat, "id")
                id.text = str(enumdef['id'])
                enum = ElementTree.SubElement(feat, "enum")
                enum.text = enumdef['enum']
                enumtxt = ElementTree.SubElement(feat, "enumtxt")
                enumtxt.text = enumdef['enumtxt']

        return ElementTree.tostring(gml, 'utf-8')
Ejemplo n.º 38
0
def string_to_date(string):
    """Convert a date string into a date object.

    :param string: the date string to parse
    :type string: str
    :returns: the parsed datetime object
    :rtype: datetime.datetime
    """
    # try date formats --mmdd, --mm-dd, yyyymmdd, yyyy-mm-dd and datetime
    # formats yyyymmddThhmmss, yyyy-mm-ddThh:mm:ss, yyyymmddThhmmssZ,
    # yyyy-mm-ddThh:mm:ssZ.
    for fmt in ("--%m%d", "--%m-%d", "%Y%m%d", "%Y-%m-%d", "%Y%m%dT%H%M%S",
                "%Y-%m-%dT%H:%M:%S", "%Y%m%dT%H%M%SZ", "%Y-%m-%dT%H:%M:%SZ"):
        try:
            return datetime.strptime(string, fmt)
        except ValueError:
            continue  # with the next format
    # try datetime formats yyyymmddThhmmsstz and yyyy-mm-ddThh:mm:sstz where tz
    # may look like -06:00.
    for fmt in ("%Y%m%dT%H%M%S%z", "%Y-%m-%dT%H:%M:%S%z"):
        try:
            return datetime.strptime(''.join(string.rsplit(":", 1)), fmt)
        except ValueError:
            continue  # with the next format
    raise ValueError
Ejemplo n.º 39
0
    def cache_dir(self, path, env='base', include_empty=False):
        '''
        Download all of the files in a subdir of the master
        '''
        ret = []
        path = self._check_proto(path)
        for fn_ in self.file_list(env):
            if fn_.startswith(path):
                local = self.cache_file('salt://{0}'.format(fn_), env)
                if not fn_.strip():
                    continue
                ret.append(local)

        if include_empty:
            # Break up the path into a list containing the bottom-level directory
            # (the one being recursively copied) and the directories preceding it
            separated = string.rsplit(path,'/',1)
            if len(separated) != 2:
                # No slashes in path. (This means all files in env will be copied)
                prefix = ''
            else:
                prefix = separated[0]
            for fn_ in self.file_list_emptydirs(env):
                if fn_.startswith(path):
                    dest = os.path.normpath(
                      os.sep.join([
                      self.opts['cachedir'],
                      'files',
                      env])) 
                    minion_dir = '%s/%s' % (dest,fn_)
                    if not os.path.isdir(minion_dir):
                        os.makedirs(minion_dir)
                    ret.append(minion_dir)
        return ret
Ejemplo n.º 40
0
 def start_activity(self, cmp):
     """
     Run the specified activity on the device
     
     """
     fullCmpStr = string.rsplit(cmp, '.', 1)[0] + '/' + cmp
     return self.shell_command('am start -a android.intent.action.MAIN -n %s' % (fullCmpStr))
Ejemplo n.º 41
0
def extractXADatasourceName(cliVector):
    dsName = regularExpressionSearch(
        "/subsystem=datasources/xa-data-source=(.*)/", cliVector)

    if (str(dsName).startswith("/subsystem=datasources")):
        return ""

    splitExpression = rsplit(dsName, "/", -1)
    newDSName = ""
    subExpressionCount = 1
    if (splitExpression):
        if (len(splitExpression) > 1):
            for subExpression in splitExpression:
                if (subExpressionCount == 1):
                    newDSName = newDSName + subExpression
                elif (subExpressionCount > 2):
                    newDSName = newDSName + "\/" + subExpression
                else:
                    newDSName = newDSName + "/" + subExpression

                subExpressionCount = subExpressionCount + 1
        else:
            newDSName = splitExpression[0]

    return newDSName
Ejemplo n.º 42
0
def workoutMimeType(filename):
    "Determine the MIME type of a file from its file extension"
    fileextension = string.rsplit(filename, ".", 1)[-1]
    if (fileextension in extensionToMimeType):
        return extensionToMimeType[fileextension]
    else:
        return "application/octet-stream"
Ejemplo n.º 43
0
def importPlanner(module_or_file_name=None):
    if module_or_file_name is None:
        if len(argv) != 2:
            print "Usage: %s <planner-module>" % argv[0]
            exit(2)
        module_or_file_name = argv[1].strip()

    if module_or_file_name.endswith(".py"):
        module_or_file_name = module_or_file_name[:-3]
    try:
        dirname, filename = string.rsplit(module_or_file_name, '/', 1)
        path.append(dirname)
    except ValueError:
        filename = module_or_file_name

    module = __import__(filename)
    try:
        if hasattr(module, 'controller'):
            if hasattr(module, 'graceful_exit'):
                return module.update, module.controller, module.graceful_exit
            return module.update, module.controller
        else:
            return module.update
    except AttributeError:
        raise AttributeError(
            "The planner module must have an update() function.")
def stream_def_name(name):
    # format for monasca stream_definition
    response_name = name
    slist = string.rsplit(name, '_', 1)
    if len(slist) > 0:
        response_name = slist[0]
    return response_name
Ejemplo n.º 45
0
 def get_symbol(self, strIp):
     strRoutine='UNKNOWN'
     strModule='UNKNOWN'
     ip2mdOut = self.ip2md(strIp)
     ip2mdProps = _str_to_dict(ip2mdOut)
     if 'Method Name' in ip2mdProps: 
         strRoutine = ip2mdProps['Method Name'].split('(')[0]
         if 'Class' in ip2mdProps:
             classPtr = ip2mdProps['Class']
             if classPtr is not None and classPtr <> '':
                 classOut = self.dumpclass(classPtr)
                 classProps = _str_to_dict(classOut)
                 if 'File' in  classProps:
                     strFile = classProps['File']
                     strModule = string.rsplit(string.rsplit(strFile, '.', 1)[0], '/', 1)[1] 
     return strModule + '!' + strRoutine
Ejemplo n.º 46
0
 def add_to_hierarchy(self, name):
     if iu.ivy_compose_character in name:
         pref, suff = string.rsplit(name, iu.ivy_compose_character, 1)
         self.add_to_hierarchy(pref)
         self.hierarchy[pref].add(suff)
     else:
         self.hierarchy['this'].add(name)
	def MagnetReadField(self):
		
		# Form the query string (Now only for GRPY)
		if self.Heater:
			Query = "READ:DEV:GRPY:PSU:SIG:FLD"
		else:
			# For some reason the command PFLD doesn't work
			Query = "READ:DEV:GRPY:PSU:SIG:PCUR"
		
		Reply = self.Visa.query(Query)
		# Find the useful part of the response
		Answer = string.rsplit(Reply,":",1)[1]
		# Some regex to get rid of the appended units
		Answer = re.split("[a-zA-Z]",Answer,1)[0]
		Answer = float(Answer)
		if self.Heater:
			self.SourceCurrent = Answer * self.AToB
			self.MagnetCurrent = self.SourceCurrent
		else:
			self.MagnetCurrent = Answer
			Answer = Answer / self.AToB
					
		self.Field = Answer
		
		return
Ejemplo n.º 48
0
Archivo: main.py Proyecto: Ealdor/pypbp
	def popup(self):
		self.completeName = tkFileDialog.askopenfilename(initialdir = "puzzles", filetypes = [("Bitmap", "*.csv"), ("Bitmap", "*.json")])
		if self.completeName != "" and self.completeName != ():
			self.startButton.config(state = 'normal')
			self.name.set("Puzzle: " + string.rsplit(self.completeName, "/")[-1])
			if len(self.name.get()) >= 60:
				self.name.set(self.name.get()[0:59] + "...")
def make_max_day_DF():
	import gw_data_stats
	import os
	#ts = pd.read_csv(ts_path + circuit + '_min.csv', delimiter = ';', index_col = 0, parse_dates = True)
	#ts[ts > ts.std()*5. + ts.mean()] = np.nan
	flist = []
	DFlist = '/media/mountpoint/Users/Mitchell/Documents/Shared_Solar/Shared_Solar_database/csv_flat_files_minute/DFlist'
	#for (dirpath,dirnames,filenames) in os.walk('C:\Users\Mitchell\Documents\Shared_Solar\Shared_Solar_database'):
	for (dirpath,dirnames,filenames) in os.walk('/media/mountpoint/Users/Mitchell/Documents/Shared_Solar/Shared_Solar_database/csv_flat_files_minute'):
		print dirpath
		if dirpath == DFlist:
			for _file in filenames:
				flist.append(_file)

	max_day_DF = pd.DataFrame(index = range(0,24))
	for ix, fname in enumerate(flist):
		x = pd.read_csv(DFlist +'/' + fname, delimiter = ';', index_col = 0, parse_dates = True)
		x[x > x.std()*5. + x.mean()] = np.nan
		x = x.dropna()
		col_head = string.rsplit(fname,'_',1)[0]
		col = pd.DataFrame(gw_data_stats.make_maxday(x))
		col.columns = [col_head]
		max_day_DF = max_day_DF.join(pd.DataFrame(col))
		
		print col_head
	return max_day_DF
Ejemplo n.º 50
0
    def _word_repl(self, word, groups):
        if self.in_dd:
            name = groups.get('word_name')
            current_page = self.formatter.page.page_name
            abs_name = AbsPageName(current_page, name)
            if abs_name == current_page:
                return self.__real_val(abs_name)
            else:
                # handle anchors
                try:
                    abs_name, anchor = rsplit(abs_name, "#", 1)
                except ValueError:
                    anchor = ""
                if self.cat_re.match(abs_name):
                    return self.__real_val(abs_name)

                else:
                    if not anchor:
                        wholename = abs_name
                    else:
                        wholename = "%s#%s" % (abs_name, anchor)

                    return self.__real_val(wholename)

        return apply(wikiParser._word_repl, (self, word, groups))
Ejemplo n.º 51
0
def workoutMimeType(filename):
    "Determine the MIME type of a file from its file extension"
    fileextension = string.rsplit(filename, ".", 1)[-1]
    if (fileextension in extensionToMimeType):
        return extensionToMimeType[fileextension]
    else:
        return "application/octet-stream"
Ejemplo n.º 52
0
def sdv_callback(client, userdata, msg):
    #print(msg.topic+" "+str(msg.payload))
    logger1.debug("sdv: " + msg.topic + "->" + msg.payload, level="i")
    topicArray = string.rsplit(msg.topic, '/')
    dev = topicArray[len(topicArray) - 3]
    ch = topicArray[len(topicArray) - 2]
    obj = topicArray[len(topicArray) - 5]
    if (int(dev) == getTlgDev()):
        logger1.debug("route to Telegram device: " + msg.payload, level="i")
        client.publish("/ssn/acc/" + str(ACCOUNT) + "/telegram/out",
                       payload=msg.payload,
                       qos=0,
                       retain=False)
    else:
        # make set dev value command:
        sdv = '{"ssn":{"v":1,"obj":' + obj + ',"cmd":"sdv", "data": {"adev":' + dev + ',"acmd":' + ch + ',"aval":' + msg.payload + '}}}"'
        #    logWrite("sdv= "+sdv, level="i")
        tmpMsg = ssnMsg(destObj=int(obj),
                        srcObj=0,
                        msgType=2,
                        msgID=None,
                        msgData=sdv)
        client.publish("/ssn/acc/" + str(ACCOUNT) + "/obj/" + obj +
                       "/commands",
                       tmpMsg.getSSNPDU(),
                       qos=0,
                       retain=False)
Ejemplo n.º 53
0
    def MagnetSetHeater(self, State):

        HeaterBefore = self.MagnetReadHeater()
        if State == 1:
            Reply = self.Visa.ask("SET:DEV:GRPZ:PSU:SIG:SWHT:ON")
        elif State == 0:
            Reply = self.Visa.ask("SET:DEV:GRPZ:PSU:SIG:SWHT:OFF")
        else:
            print "Error cannot set switch heater\n"

        Answer = string.rsplit(Reply, ":", 1)[1]
        if Answer == "VALID":
            Heater = 1
        elif Answer == "INVALID":
            Heater = 0
        else:
            Heater = -1

        HeaterAfter = self.MagnetReadHeater()
        if HeaterAfter != HeaterBefore:
            print "Heater Switched! Waiting 2 min\n"
            time.sleep(120)
            print "Finished wait!\n"

        return Heater
Ejemplo n.º 54
0
def xmlize_items(items, query):
    items_a = []

    for item in items:
        list = string.rsplit(item, "/", 1)
        name = list[-1]
        path = item if len(list) == 2 else ""

        complete = item
        if item.lower().startswith(query.lower()):
            i = item.find("/", len(query))
            if i != -1:
                complete = item[:(i + 1)]

        items_a.append("""
    <item uid="%(item)s" arg="%(item)s" autocomplete="%(complete)s">
        <title>%(name)s</title>
        <subtitle>%(path)s</subtitle>
    </item>
        """ % {
            'item': item,
            'name': name,
            'path': path,
            'complete': complete
        })

    return """
<?xml version="1.0"?>
<items>
    %s
</items>
    """ % '\n'.join(items_a)
Ejemplo n.º 55
0
def move_files(filenames, backup_dir):
    for name in filenames:
        dir = rsplit(backup_dir + name.split(settings.MEDIA_ROOT)[1], '\\', 1)[0]
        if not os.path.exists(dir):
            os.makedirs(dir)
        shutil.copyfile(name, backup_dir + name.split(settings.MEDIA_ROOT)[1])
    remove_files(filenames)
Ejemplo n.º 56
0
def open_filters(fdir, usecols=[0, 1]):
    ff = RootDir + fdir + '/'
    fil_ext = 'fil' if fdir == 'CFHTLS' else 'dat'
    flist = glob.glob(ff + '*.' + fil_ext)
    nfilters = len(flist)

    print '%ld filters found for %s' % (nfilters, fdir)
    if nfilters == 0:
        raise ValueError('no filters found!')

    filters = {}  #'flist':flist}
    filters_sorted = {}  #'flist':flist}
    fname = []
    meanw = []
    for i in range(nfilters):
        fdata = np.loadtxt(flist[i], usecols=usecols)
        filtername = string.rsplit(flist[i], '/', 1)[1].rsplit('.', 1)[0]
        fname.append(filtername)
        meanw.append(wp.quantile(fdata[:, 0], fdata[:, 1], 0.5))
        filters.update({filtername: fdata})
    filters.update({'filters': fname})
    filters.update({'MeanWavelength': meanw})

    wsort = np.argsort(filters['MeanWavelength'])
    fnamelist = np.asarray(filters['filters'])[wsort]

    filters_sorted.update({'filters': fnamelist})
    for i in range(nfilters):
        f_i = fnamelist[i]
        filters_sorted.update({f_i: filters[f_i]})

    filters_sorted.update(
        {'MeanWavelength': np.asarray(filters['MeanWavelength'])[wsort]})

    return filters_sorted
Ejemplo n.º 57
0
    def get_dir(self, path, dest='', env='base', gzip=None):
        '''
        Get a directory recursively from the salt-master
        '''
        # TODO: We need to get rid of using the string lib in here
        ret = []
        # Strip trailing slash
        path = string.rstrip(self._check_proto(path), '/')
        # Break up the path into a list containing the bottom-level directory
        # (the one being recursively copied) and the directories preceding it
        separated = string.rsplit(path, '/', 1)
        if len(separated) != 2:
            # No slashes in path. (This means all files in env will be copied)
            prefix = ''
        else:
            prefix = separated[0]

        # Copy files from master
        for fn_ in self.file_list(env):
            if fn_.startswith(path):
                # Prevent files in "salt://foobar/" (or salt://foo.sh) from
                # matching a path of "salt://foo"
                try:
                    if fn_[len(path)] != '/':
                        continue
                except IndexError:
                    continue
                # Remove the leading directories from path to derive
                # the relative path on the minion.
                minion_relpath = string.lstrip(fn_[len(prefix):], '/')
                ret.append(
                    self.get_file(
                        'salt://{0}'.format(fn_),
                        '{0}/{1}'.format(dest, minion_relpath),
                        True, env, gzip
                    )
                )
        # Replicate empty dirs from master
        try:
            for fn_ in self.file_list_emptydirs(env):
                if fn_.startswith(path):
                    # Prevent an empty dir "salt://foobar/" from matching a path of
                    # "salt://foo"
                    try:
                        if fn_[len(path)] != '/':
                            continue
                    except IndexError:
                        continue
                    # Remove the leading directories from path to derive
                    # the relative path on the minion.
                    minion_relpath = string.lstrip(fn_[len(prefix):], '/')
                    minion_mkdir = '{0}/{1}'.format(dest, minion_relpath)
                    if not os.path.isdir(minion_mkdir):
                        os.makedirs(minion_mkdir)
                    ret.append(minion_mkdir)
        except TypeError:
            pass
        ret.sort()
        return ret