Example #1
0
    def __init__(self, server, server_id):
        self.server_id = server_id
        self.host, self.port = server.split(':')
        try:
            stat = send_cmd(self.host, self.port, b'stat\n')

            sio = StringIO(stat)
            line = sio.readline()
            m = re.search('.*: (\d+\.\d+\.\d+)-.*', line)
            self.version = m.group(1)
            sio.readline()
            self.sessions = []
            for line in sio:
                if not line.strip():
                    break
                self.sessions.append(Session(line.strip(), server_id))
            for line in sio:
                attr, value = line.split(':')
                attr = attr.strip().replace(" ", "_").replace("/", "_").lower()
                setattr(self, attr, value.strip())

            self.min_latency, self.avg_latency, self.max_latency = self.latency_min_avg_max.split("/")

            self.unavailable = False
        except:
            self.unavailable = True
            self.mode = "Unavailable"
            self.sessions = []
            self.version = "Unknown"
            return
    def getOptionsBoxFileContentsInfo(prevChoices):
        if prevChoices.history or prevChoices.input:
            if prevChoices.history:
                inputFile = open(ExternalTrackManager.extractFnFromGalaxyTN(prevChoices.history.split(':')), 'r')
            else:
                inputFile = StringIO(prevChoices.input)
            
            for i in xrange(TabularToGtrackTool._getNumSkipLines(prevChoices)):
                inputFile.readline()
            
            table = []
            splitChar = TabularToGtrackTool._getSplitChar(prevChoices)
            numCols = None
            error = None
            for i,line in enumerate(inputFile):
                row = [x.strip() for x in line.strip().split(splitChar)]
                if numCols == None:
                    numCols = len(row)
                elif numCols != len(row):
                    numCols = max(numCols, len(row))
#                    error = 'Error: the number of columns varies over the rows of the tabular file.'
                    
                table.append(row)
                if i == TabularToGtrackTool.NUM_ROWS_IN_TABLE:
                    break
            
            numCols = max(len(row) for row in table) if len(table) > 0 else 0
            
            if error is None:
                if numCols > TabularToGtrackTool.NUM_COLUMN_FUNCTIONS:
                    error = 'Error: the tabular file has more columns than is allowed by the tool (%s > %s).' % (numCols, TabularToGtrackTool.NUM_COLUMN_FUNCTIONS)
                
            return ('__hidden__', FileContentsInfo(table=table, numCols=numCols, error=error))
Example #3
0
File: zktop.py Project: phunt/zktop
    def __init__(self, server, server_id):
        self.server_id = server_id
        if ':' in server:
            self.host, self.port = server.split(':')[0], int(server.split(':')[1])
        else: # fallback to default if user doesn't specify port number
            self.host, self.port = server, ZK_DEFAULT_PORT
        try:
            stat = send_cmd(self.host, self.port, b'stat\n')

            sio = StringIO(stat)
            line = sio.readline()
            m = re.search('.*: (\d+\.\d+\.\d+)-.*', line) # e.g. nodecount:0 zxid:0x0 sessions:0o att
            self.version = m.group(1) # raise Exception when stat response empty
            sio.readline()
            self.sessions = []
            for line in sio:
                if not line.strip():
                    break
                self.sessions.append(Session(line.strip(), server_id))
            for line in sio:
                attr, value = line.split(':')
                attr = attr.strip().replace(" ", "_").replace("/", "_").lower()
                setattr(self, attr, value.strip())

            self.min_latency, self.avg_latency, self.max_latency = self.latency_min_avg_max.split("/")

            self.unavailable = False
        except: # e.g., when server responds with '' (not reachable)
            self.unavailable = True
            self.mode = "Unavailable"
            self.sessions = []
            self.version = "Unknown"
            return
Example #4
0
    def get_dialect(self):

        if self.dialect:
            return

        try:
            if self.buffer:
                flat_file = StringIO(self.buffer)
            else:
                flat_file = open(self.path, mode = "rb")
            try:
                flat_file.seek(0)
                for line in range(self.skip_lines):
                    flat_file.readline()
                tell = flat_file.tell()

                sniffer = csv.Sniffer()
                self.dialect = sniffer.sniff(flat_file.read(20240))
                if self.dialect.delimiter not in [' ','\t','|',',',';',':']:
                    raise csv.Error
                flat_file.seek(tell)
                if not self.skip_lines:
                    self.has_header = sniffer.has_header(flat_file.read(20240))
            except csv.Error:
                self.dialect = csv.excel
                self.has_header = True
            if self.dialect.delimiter == ' ':
                self.dialect.delimiter = ','
            if self.buffer:
                flat_file.seek(0)
        finally:
            flat_file.close()
Example #5
0
def inspect_dbs(output_dir='.', db_names=None, db_aliases=None, alias_prefix='SEC_', db_alias_lower=str.lower, verbosity=1):
    db_names = db_names or settings.INSPECT_DB_NAMES
    db_aliases = db_aliases or [alias_prefix + db_alias_lower(name) for name in db_names]
    for db_name, db_alias in zip(db_names, db_aliases):
        fn = os.path.join(os.path.realpath(output_dir), 'models_%s.py' % db_alias)
        if verbosity:
            sys.stderr.write('Writing model definitions to file %r for db_alias %r.\n' % (fn, db_alias))
        models_py_buffer = StringIO()
        call_command('inspectdb', database=db_alias, verbosity=0, traceback=False, interactive=False, stdout=models_py_buffer)
        models_py_buffer.seek(0)
        with open(fn, 'w') as fp:
            line = models_py_buffer.readline()
            while line and fp:
                if verbosity > 2:
                    sys.stderr.write('READ: %r\n' % line)
                seditted_lines = line
                for sed in inspect_dbs.seds:
                    if sed['regex'].match(line):
                        seditted_lines =  sed.get('before', '').format(**{'db_name': db_name, 'alias_prefix': alias_prefix}) or ''
                        seditted_lines += line if sed.get('sub', None) is None else sed['regex'].sub(sed['sub'], line)
                        seditted_lines += sed.get('after', '').format(**{'db_name': db_name, 'alias_prefix': alias_prefix}) or ''
                        if verbosity > 1:
                            print 'WAS: %r' % line
                            print ' IS: %r' % seditted_lines
                        break;  # stop processing the regexes if one already matched this line
                if verbosity > 2:
                    sys.stderr.write('WRITING: %r\n' % seditted_lines)
                # TODO: Add a multi-line edit that deals with multiple primary_key=True fields
                #       * delete second and subsequent primary_key=True arguments within the same Model
                #       * add a unique_together constraint on all the primary_keys that were originally there
                fp.write(seditted_lines)
                line = models_py_buffer.readline()
Example #6
0
	def parseResponse (self,rawResponse):
		'''Parses a Raw response from a string and replaces the current object'''
		
		par=StringIO(rawResponse)

		header=par.readline()
		try:
			self.protocol,self.code,self.message=re.findall("(HTTP\S+) ([0-9]+)\s*(.*)",header)[0]
		except:
			self.protocol="Unknown"
			self.code="999"
			self.message="BUG: Parsing Error"

		self.code=int(self.code)

		self.__headers=[]
		line=par.readline().strip()
		while line:
			k,v=re.findall("^([^:]+):\s*(.*)\s*$",line)[0]
			line=par.readline().strip()
			self.addHeader(k,v)

		self.__content=par.read()

		self.delHeader("Transfer-Encoding")
Example #7
0
 def dump_csv(self, response, dialect=csv.excel):
     in_headers = [
         'breakdown',
         'indicator',
         'ref_area',
         'time_period',
         'unit_measure',
         'value']
     out_headers = [
         'indicator',
         'breakdown',
         'unit_measure',
         'time_period',
         'ref_area',
         'value']
     writer = csv.DictWriter(response, out_headers, dialect=dialect, restval='')
     writer.writeheader()
     data = StringIO(self.cube.dump(data_format="text/csv"))
     data.readline() #skip header
     reader = csv.DictReader(data, in_headers, restval='')
     for row in reader:
         encoded_row = {}
         for k,v in row.iteritems():
             encoded_row[k] = unicode(v).encode('utf-8')
         writer.writerow(encoded_row)
     return response
Example #8
0
def do_comparison(good_record, test_record):
    """Compare two records to see if they are the same.

    Ths compares the two GenBank record, and will raise an AssertionError
    if two lines do not match, showing the non-matching lines.
    """
    good_handle = StringIO(good_record)
    test_handle = StringIO(test_record)

    while True:
        good_line = good_handle.readline()
        test_line = test_handle.readline()

        if not(good_line) and not(test_line):
            break

        if not(good_line):
            if good_line.strip():
                raise AssertionError("Extra info in Test: `%s`" % test_line)
        if not(test_line):
            if test_line.strip():
                raise AssertionError("Extra info in Expected: `%s`"
                                     % good_line)

        assert test_line == good_line, \
               "Expected does not match Test.\nExpect:`%s`\nTest  :`%s`\n" % \
               (good_line, test_line)
Example #9
0
def main():
    url = 'http://www.federalreserve.gov/datadownload/Output.aspx'
    params = {
        'rel': 'Z1',
        'series': '0c26ea0130f873240b74c6c9a99325ac',
        'lastObs': '',
        'from': '12/31/1951',
        'to': '12/31/3000',
        'filetype': 'csv',
        'label': 'omit',
        'layout': 'seriescolumn',
    }
    csvstring = scraperwiki.scrape(url, params)
    print 'Downloaded {0} bytes from {1}?{2}'.format(len(csvstring), url, urllib.urlencode(params))
    csvfile = StringIO(csvstring)
    # Drop the first line because the column headers are on the second line
    csvfile.readline()


    rdr = csv.DictReader(csvfile)
    rows = list(rdr)
    print 'Processing {0} rows from CSV data'.format(len(rows))

    quarter_end_dates = {
        '1': {'month': 3, 'day': 31},
        '2': {'month': 6, 'day': 30},
        '3': {'month': 9, 'day': 30},
        '4': {'month': 12, 'day': 31}
    }
    latest_date = datetime.date(1900, 1, 1)
    for row in rows:
        # Sample row:
        # {'FL103164103.Q': '14276632.0', 'Time Period': '2011Q4', 'FL102090005.Q': '16355038.9'}
        #pprint(row)
        (year, quarter) = row['Time Period'].split('Q')
        quarter_kwargs = quarter_end_dates[quarter]
        row['Date'] = datetime.date(year=int(year), **quarter_kwargs)
        row['FL103164103'] = Decimal(row['FL103164103.Q'])
        row['FL102090005'] = Decimal(row['FL102090005.Q'])
        # The scraperwiki datastore can't handle periods in the keys
        if row['Date'] > latest_date:
            latest_date = row['Date']
        del row['FL103164103.Q']
        del row['FL102090005.Q']
        row['QRATIO'] = row['FL103164103'] / row['FL102090005']

    mean_qratio = sum((row['QRATIO'] for row in rows)) / len(rows)
    print "Mean: {0}".format(mean_qratio)
    print "Latest date: {0}".format(latest_date)

    moving_average_qratio = MovingAverage(windowsize=4)
    for row in rows:
        row['QRATIO_NORMALIZED'] = row['QRATIO'] / mean_qratio
        moving_average_qratio.push(row['QRATIO'])
        row['QRATIO_4QMAVG'] = moving_average_qratio.weighted()
        row['QRATIO_4QMAVG_NORMALIZED'] = row['QRATIO_4QMAVG'] / mean_qratio

    for row in rows:
        scraperwiki.sqlite.save(['Time Period'], row)
    def test_create_table(self):
        file_ = StringIO()
        rule = Rule(1, None, 2, 5, 0.3, "item")
        CsvGenerator.create_pair_csv([rule], file_)

        file_.seek(0)
        assert ('pid1,qid1,pid2,count,probability,context' ==
                file_.readline().strip())
        assert '1,,2,5,0.3,item' == file_.readline().strip()
Example #11
0
class ParseInfo(object):
    
    def __init__(self):
        self.request = StringIO()
        self.response = StringIO()
        self.request_header = {}
        self.response_header = {}
    
    def write(self, method, s):
        """写入请求数据"""
        if method == 'request':
            self.request.write(s)
        elif method == 'response':
            self.response.write(s)
    
    def parse(self):
        #request header
        self.request.seek(0)
        self.request_commend = self.request.readline()
        self.request_header = httplib.HTTPMessage(self.request)
        #request body
        self.request_body = ''.join(self.request.readlines())
        request_body_encode = chardet.detect(self.request_body).get('encoding')
        if request_body_encode:
            self.request_body = self.request_body.decode(request_body_encode, 'replace')
        
        #response header
        self.response.seek(0)
        self.response_status = self.response.readline()
        self.response_header = httplib.HTTPMessage(self.response)
        #response body
        chunked = self.response_header.get('Transfer-Encoding', '')
        if chunked == 'chunked':
            content = []
            chunk_size = int(self.response.readline()[:-2], 16)
            while chunk_size > 0:
                content.append(self.response.read(chunk_size))
                self.response.read(2)
                chunk_size = int(self.response.readline()[:-2], 16)
            self.response_body = ''.join(content)
        else:
            self.response_body = ''.join(self.response.readlines())
        try:
            if self.response_header.get('Content-Encoding') == 'gzip':
                self.response_body = gzip.GzipFile(fileobj=StringIO(self.response_body)).read()
        except Exception, e:
            pass
        response_body_encode = chardet.detect(self.response_body).get('encoding')
        if response_body_encode:
            if response_body_encode.lower() == 'gb2312':
                response_body_encode = 'gb18030'
            self.response_body = self.response_body.decode(response_body_encode, 'replace')
        self.raw_request = self.request.getvalue()
        self.raw_response = self.response.getvalue()
        del self.request
        del self.response
 def get_row_names_from_infile(self):
     file_name = self.options.infile_name 
     fh = StringIO(self.csvdata)
     next_line = fh.readline()
     while next_line != "": 
         next_list_of_floats = []
         split_next_line = next_line.split(",") 
         self.row_names = self.row_names + [split_next_line[0]] 
         next_line = fh.readline()
     fh.close() 
Example #13
0
def main(sock):
    #http parsing
    infeed = StringIO(sock.recv(0xFFFF))

    line = infeed.readline().strip() #get request
    try:
        method, url, version = line.split(" ")
    except ValueError: #No HTTP no DEAL
        return


    #Url handling
    lines = [line]
    headers = {}
    while line:
        try:
            header, content = line.split(":",1)
            headers[header] = content.strip()
        except ValueError: #discard ValueErrors of mal formatted headers
            pass
        
        line = infeed.readline().strip() #get request
        lines.append(line)

    if "Content-Length" in headers:
        try:
            lines.append(infeed.read(int(headers["Content-Length"])))
        except:
            return # funny stuff makes your connection dead eg: value errors    

    if "Sec-WebSocket-Key" in headers: # oh goody, a websocket
    	websocket.connection(method, url, version, headers)
    	return #Its a websockjet so we dotn do HTTP stuff


    # log_request(lines) #always log before giving an answer

    #bumping to sepcified servername
    if settings["bumptoservername"] == "1":
        if "Host" in headers:
            if headers["Host"] not in [settings['servername'], settings['ipv4dnsrecord'], settings['ipv6dnsrecord']] :
                outfeed = StringIO()
                print >>outfeed, "HTTP/1.1 302 FOUND"
                print >>outfeed, "Location: %s://%s%s" % (proto_name(sock), settings["servername"], url)
                print >>outfeed, ""

                sock.sendall(outfeed.getvalue())
                return

    #now delegate
    outfeed = StringIO()
    selector.metadata = {"socket": sock}
    selector.loadpages()
    selector._delegate(outfeed, method, url, version, headers, lines)
    sock.sendall(outfeed.getvalue())
Example #14
0
    def _parse_stat(self, data):
        """ Parse the output from the 'stat' 4letter word command """
        h = StringIO(data)

        result = {}
        
        version = h.readline()
        if version:
            result['zk_version'] = version[version.index(':')+1:].strip()

        # skip all lines until we find the empty one
        while h.readline().strip(): pass

        for line in h.readlines():
            m = re.match('Latency min/avg/max: (\d+)/(\d+)/(\d+)', line)
            if m is not None:
                result['zk_min_latency'] = int(m.group(1))
                result['zk_avg_latency'] = int(m.group(2))
                result['zk_max_latency'] = int(m.group(3))
                continue

            m = re.match('Received: (\d+)', line)
            if m is not None:
                result['zk_packets_received'] = int(m.group(1))
                continue

            m = re.match('Sent: (\d+)', line)
            if m is not None:
                result['zk_packets_sent'] = int(m.group(1))
                continue

            m = re.match('Outstanding: (\d+)', line)
            if m is not None:
                result['zk_outstanding_requests'] = int(m.group(1))
                continue

            m = re.match('Mode: (.*)', line)
            if m is not None:
                result['zk_server_state'] = m.group(1)
                continue

            m = re.match('Node count: (\d+)', line)
            if m is not None:
                result['zk_znode_count'] = int(m.group(1))
                continue

            m = re.match('Zxid: (0x[0-9a-fA-F]+)', line)
            if m is not None:
                result['zk_zxid']         = m.group(1)
                result['zk_zxid_counter'] = int(m.group(1), 16) & int('0xffffffff', 16) # lower 32 bits
                result['zk_zxid_epoch']   = int(m.group(1), 16) >>32 # high 32 bits
                continue

        return result 
    def test_write_csv(self):
        out = StringIO()
        CsvWriter.write_csv(test_data, out)
        out.seek(0)

        line = out.readline()
        self.assertThat(line.strip(), Equals("Q51,31,wikibase-entityid,Q5107"))

        line = out.readline()
        self.assertThat(line.strip(), Equals("Q51,373,string,Europe"))

        self.assertThat(out.read(), Equals(""))
Example #16
0
 def get_report(self, report_id):
     """MWS GetReport/2009-01-01 API call; quota=15 restore=60.00
     Returns the contents of a report."""
     report = self.mws.get_report(ReportId=report_id)
     data = []
     f = StringIO(report)
     f.readline()  # skip header row
     for row in reader(f, delimiter='\t'):
         #fields = dict((field, '') for field in row)
         #break
         zipped = zip(ORDER_REPORT_FIELDS, row)
         data.append(zipped)
     return data
 def test_verbosity(self):
     stdout = StringIO()
     self.create_tenant('tenant', stdout=stdout, verbosity=3)
     tenant = Tenant.objects.get(name='tenant')
     stdout.seek(0)
     connection = connections[tenant._state.db]
     if connection.vendor == 'postgresql':
         self.assertIn(tenant.db_schema, stdout.readline())
     for model in TenantModelBase.references:
         self.assertIn(model._meta.object_name, stdout.readline())
         self.assertIn(model._meta.db_table, stdout.readline())
     self.assertIn('Installing indexes ...', stdout.readline())
     tenant.delete()
Example #18
0
    def __init__(self, orig_diff):
        self.orig_diff = orig_diff
        self.composed_lines = []
        self.top_matter = _echo('')

        # we want to maintain things like trailing newline or not, so
        # use readline instead of the line iterators
        diff_s = StringIO(orig_diff)
        line = diff_s.readline()

        while line != u'':
            self.composed_lines.append(_echo(line))
            line = diff_s.readline()
Example #19
0
    def guess_skip_lines(self, max=50, guess_lines=50, percent=0.6):

        if self.buffer:
            flat_file = StringIO(self.buffer)
        else:
            flat_file = open(self.path, mode = "rb")

        best_line = 0
        best_percent = 0

        for i in xrange(50):
            flat_file.seek(0)
            for line in range(i):
                flat_file.readline()
            tell = flat_file.tell()
            flat_file.seek(tell)

            sniffer = csv.Sniffer()
            if self.dialect:
                dialect = self.dialect
            else:
                dialect = sniffer.sniff(flat_file.read(20240))
                if dialect.delimiter not in [' ','\t','|',',',';',':']:
                    dialect = csv.excel
                if dialect.delimiter == ' ':
                    dialect.delimiter = ','

            flat_file.seek(tell)
            csv_reader = UnicodeReader(flat_file, dialect, self.encoding)
            slice = itertools.islice(csv_reader, 0, guess_lines)
            good_lines, bad_lines = 0, 0
            first_line = slice.next()
            first_line_len = len([item for item in first_line if item])
            for line in slice:
                if first_line_len == len(line):
                    good_lines += 1
                else:
                    bad_lines += 1
            if bad_lines == 0 and good_lines > 5:
                self.skip_lines = i 
                self.guessed_skip_lines = True
                return
            ## when at end of file
            if bad_lines + good_lines == 0:
                break
            good_percent = good_lines / (bad_lines + good_lines)
            if good_percent > percent and good_percent > best_percent:
                best_percent = good_percent
                best_line = i
        self.skip_lines = best_line
        self.guessed_skip_lines = True
Example #20
0
	def post(self):
		guserid = users.get_current_user()
		if not guserid:
			return

		merge = self.request.get('merge')
		viewonly = self.request.get('viewonly')
		overwrite = self.request.get('overwrite')
		htmlfile = self.request.get('file')
		useremail = guserid.email()
		htmlfile = StringIO(htmlfile)
		htmlfile.readline()
		cblist = ntuceiba_parser(htmlfile)

		enflag = NtuCeibaEvent()
		enflag.flagize()
		if self.request.get('addtitle') != "":
			enflag.title = True
		if self.request.get('addmember') != "":
			enflag.member = True
		if self.request.get('addmethod') != "":
			enflag.method = True
		if self.request.get('addpercent') != "":
			enflag.percent = True
		if self.request.get('adddue') != "":
			enflag.duedate = True
		if self.request.get('addlate') != "":
			enflag.late = True
		if self.request.get('addsub') != "":
			enflag.subdate = True
		if self.request.get('addcomment') != "":
			enflag.comment = True


		if merge != "":
			if overwrite != "":
				ntuceiba_merge(cblist, useremail, True)
			else:
				ntuceiba_merge(cblist, useremail, False)

		if viewonly != "":
			self.response.headers['Content-Type'] = 'text/xml; charset=UTF-8'
			self.response.out.write(ntuceiba_toxml(cblist))
		else:
			resrepo = ntuceiba_gaeds_update(cblist, useremail, enflag, None)
			self.response.headers['Content-Type'] = 'text/plain'
			self.response.out.write(
				str(resrepo[0]) + " " + 
				str(resrepo[1]) + " " +
				str(resrepo[2]))
Example #21
0
def _get_table(string):
    """Get climate-index table from file or url."""
    try:
        f = open(string)
    except:
        f = StringIO(urllib2.urlopen(string).read())  # str as file
    y1, y2 = [int(y) for y in f.readline().split()]   # read first line
    nrows = y2 - y1 + 1                           
    table = np.zeros((nrows, 13), 'f4')               # time + 12 months
    for l in xrange(nrows):                           # read main data
        table[l] = [float(n) for n in f.readline().split()]
    missing = float(f.readline())                     # read missing val
    table[table==missing] = np.nan
    return table
    def _parse_stat(self, data):
        """ Parse the output from the 'stat' 4letter word command """
        h = StringIO(data)

        result = {}

        version = h.readline()
        if version:
            result["zk_version"] = version[version.index(":") + 1 :].strip()

        # skip all lines until we find the empty one
        while h.readline().strip():
            pass

        for line in h.readlines():
            m = re.match("Latency min/avg/max: (\d+)/(\d+)/(\d+)", line)
            if m is not None:
                result["zk_min_latency"] = int(m.group(1))
                result["zk_avg_latency"] = int(m.group(2))
                result["zk_max_latency"] = int(m.group(3))
                continue

            m = re.match("Received: (\d+)", line)
            if m is not None:
                result["zk_packets_received"] = int(m.group(1))
                continue

            m = re.match("Sent: (\d+)", line)
            if m is not None:
                result["zk_packets_sent"] = int(m.group(1))
                continue

            m = re.match("Outstanding: (\d+)", line)
            if m is not None:
                result["zk_outstanding_requests"] = int(m.group(1))
                continue

            m = re.match("Mode: (.*)", line)
            if m is not None:
                result["zk_server_state"] = m.group(1)
                continue

            m = re.match("Node count: (\d+)", line)
            if m is not None:
                result["zk_znode_count"] = int(m.group(1))
                continue

        return result
    def assert_file_content(self, name, expected_file_content):
        if expected_file_content == "":
            self.assert_file_empty(name)

        count_of_new_lines = expected_file_content.count("\n")

        if count_of_new_lines == 0:
            expected_lines = 1
        else:
            expected_lines = count_of_new_lines

        expected_content = StringIO(expected_file_content)
        actual_line_number = 0

        full_path = self.full_path(name)
        with open(full_path) as file:
            for actual_line in file:
                actual_line_number += 1
                actual_line_showing_escaped_new_line = actual_line.replace("\n", "\\n")

                expected_line = expected_content.readline()
                expected_line_showing_escaped_new_line = expected_line.replace("\n", "\\n")

                message = 'line {0} is not as expected.\n   expected: "{1}"\n    but got: "{2}"'.format(
                    actual_line_number, expected_line_showing_escaped_new_line, actual_line_showing_escaped_new_line)
                self.assertEquals(expected_line, actual_line, message)

        self.assertEqual(expected_lines, actual_line_number)
Example #24
0
def main(file_path):
    with ZipFile(file_path) as z:
        file_name = (os.path.basename(file_path).rsplit('.', 1)[0])+'.txt'
        f = StringIO(z.read(file_name))
        header = json.loads(f.readline())
        i = 0
        for line in f.readlines():
            hash, coinbase, merkle_branch = json.loads(line.replace('\n', ''))
            if not validate_origin(hash, coinbase, merkle_branch):
                return {
                    'status': 'ERR',
                    'err': 'wrong origin (missing /slush/ in coinbase or wrong merkle root)',
                    'hashrate': None,
                }
            if not validate(hash, header['difficulty']):
                return {
                    'status': 'ERR',
                    'err': 'too low difficulty',
                    'hashrate': None,
                }
            i += 1
    return {
        'status': 'OK',
        'err': None,
        'hashrate': hashrate_from_proof(i, header['difficulty'])
    }
Example #25
0
  def test_seek_set(self):
    for compression_type in [CompressionTypes.BZIP2, CompressionTypes.GZIP]:
      file_name = self._create_compressed_file(compression_type, self.content)
      with open(file_name, 'rb') as f:
        compressed_fd = CompressedFile(f, compression_type,
                                       read_size=self.read_block_size)
        reference_fd = StringIO(self.content)

        # Note: content (readline) check must come before position (tell) check
        # because cStringIO's tell() reports out of bound positions (if we seek
        # beyond the file) up until a real read occurs.
        # _CompressedFile.tell() always stays within the bounds of the
        # uncompressed content.
        for seek_position in (-1, 0, 1,
                              len(self.content)-1, len(self.content),
                              len(self.content) + 1):
          compressed_fd.seek(seek_position, os.SEEK_SET)
          reference_fd.seek(seek_position, os.SEEK_SET)

          uncompressed_line = compressed_fd.readline()
          reference_line = reference_fd.readline()
          self.assertEqual(uncompressed_line, reference_line)

          uncompressed_position = compressed_fd.tell()
          reference_position = reference_fd.tell()
          self.assertEqual(uncompressed_position, reference_position)
Example #26
0
	def __init__(self,cache_size):
		self.authenticate()
		#bit sloppy but shouldnt be able to lead to a significant attack
		self.cache_size = cache_size
		try:
			f,meta = self.client.get_file_and_metadata("/config")	
			hack = StringIO(f.read())
			hack.seek(0)
			size = int(hack.readline())
			sector_size = int(hack.readline())
			super(DropboxDiskDriver,self).__init__(size,sector_size,cache_size)
			f.close()
		except rest.ErrorResponse:
			print "config doesnt exist on server"
			super(DropboxDiskDriver,self).__init__(0,1,cache_size)
			pass
Example #27
0
File: util.py Project: hosle/tapas
class HttpRequest(BaseHTTPRequestHandler):
    def __init__(self, request_text):
        self.path = ''
        self.rfile = StringIO(request_text)
        self.raw_requestline = self.rfile.readline()
        self.error_code = self.error_message = None
        self.parse_request()
        self.query = {}
        self.method = request_text[:4].strip().lower()
        if '?' in self.path:
            self.path, _ = self.path.split('?', 2)
            for k,v in parse_qs(_).iteritems():
                if isinstance(v, list):
                    self.query[k] = v[0]
                else:
                    self.query[k] = v

    def send_error(self, code, message):
        self.error_code = code
        self.error_message = message

    def __repr__(self):
        return '<HttpRequest method=%s path=%s query=%s>' %(self.method, self.path, self.query)

    def toDict(self):
        return dict(method=self.method, path=self.path, query=self.query)
Example #28
0
class HTTPRequest(BaseHTTPRequestHandler):

    def __init__(self, request_text):
        self.rfile = StringIO(request_text)
        self.raw_requestline = self.rfile.readline()
        self.error_code = self.error_message = None
        self.parse_request()
Example #29
0
def fetch_swift_accounts(accset, par, stordevs):

    ssh_opts="-o ConnectTimeout=1 -o StrictHostKeyChecking=no"
    # XXX Silly to have just path and explicit python, make it 755 or something
    ssh_cmd="python %s" % par.cfg["collpath"]

    for dev in stordevs:
        ssh_remote = "%s %s" % (ssh_cmd, dev[1])
        pargs = "ssh %s %s %s" % (ssh_opts, dev[0], ssh_remote)
        # XXX go to shell=False later, needs tokenizing arguments
        p = subprocess.Popen(pargs, stdout=subprocess.PIPE, shell=True)
        # The errors actually go to terminal, so no need to capture them here.
        # We capture all of the out pipe because we do not know how to plug
        # into Popen.communicate(). Then, we make a file-like object from
        # the pipe contents string and use readline() on it. Yee-haaw.
        out = p.communicate()[0]
        sfp = StringIO(out)
        while 1:
            line = sfp.readline()
            if len(line)==0:
                break
            accstr = line.rstrip("\r\n")
            # Check basic syntax, just in case
            if len(accstr.split('/')) != 3:
                continue
            a = accset.get(accstr, JointAccount(accstr))
            a.in_swift = True
            accset[accstr] = a
        # XXX Check exit code
        excode = p.wait()
Example #30
0
    def Write(self, value):
        """Write the given value to the file
        @param value: (Unicode) String of text to write to disk
        @note: exceptions are allowed to be raised for the writing
               but 

        """
        # Check if a magic comment was added or changed
        tbuff = StringIO(value)
        enc = CheckMagicComment([ tbuff.readline() for x in range(2) ])
        tbuff.close()
        del tbuff

        # Update encoding if necessary
        if enc is not None:
            self.encoding = enc

        # Open and write the file
        if self.DoOpen('wb'):
            Log("[ed_txt][info] Opened %s, writing as %s" % (self.path, self.encoding))
            writer = codecs.getwriter(self.encoding)(self._handle)
            if self.HasBom():
                Log("[ed_txt][info] Adding BOM back to text")
                value = self.bom + value
            writer.write(value)
            writer.close()
            Log("[ed_txt][info] %s was written successfully" % self.path)
        else:
            raise WriteError, self.last_err
Example #31
0
class SgeKeyValueParser(object):
    """
    Parser for SGE commands returning lines with key-value pairs.
    It takes into account multi-line key-value pairs.
    It works as an iterator returning (key, value) tuples or as a dictionary.
    It allows to filter for keys.
    """

    KEY_VALUE_RE = re.compile(r"^([^ ]+) +(.+)$")

    def __init__(self, stream, filter_keys=None, key_suffix=None):
        """
        :param stream: an string or a file-like object implementing readline()
        :param filter_keys: an iterable with the list of keys of interest.
        :param key_suffix: a key suffix to remove when parsing
        """

        # check whether it is an string or a file-like object
        if isinstance(stream, basestring):
            self.stream = StringIO(stream)
        else:
            self.stream = stream

        self.filter_keys = set(filter_keys) if filter_keys is not None else None
        self.key_suffix = key_suffix

    def next(self):
        """
        Return the next key-value pair.
        :return: (key, value)
        """

        key, value = None, None
        while key is None:
            line = self.stream.readline()
            if len(line) == 0:
                raise StopIteration

            line = line.rstrip(" \n")

            # check for multi-line options
            while len(line) > 0 and line[-1] == "\\":
                line = line[:-1] + self.stream.readline().rstrip(" \n").lstrip(" ")

            m = self.KEY_VALUE_RE.match(line)
            if m is not None:
                key, value = m.groups()
                if self.key_suffix is not None and key.endswith(self.key_suffix):
                    key = key[:-len(self.key_suffix)]
                if self.filter_keys is not None and key not in self.filter_keys:
                    key = None # skip this pair

        return key, value

    def __iter__(self):
        return self

    def as_dict(self):
        """
        Parses the key-value pairs and return them as a dictionary.
        :return: a dictionary containing key-value pairs parsed from a SGE command.
        """

        d = dict()
        for key, value in self:
            d[key] = value

        return d
Example #32
0
def test_filecache():
    import dxtbx.filecache
    import libtbx.load_env
    from StringIO import StringIO  # this is not cStringIO on purpose!

    dxtbx_dir = libtbx.env.dist_path('dxtbx')
    image = os.path.join(dxtbx_dir, 'tests', 'phi_scan_001.cbf')

    with open(image, 'rb') as fh:
        correct_data = fh.read()

    # Create a caching object
    cache = dxtbx.filecache.lazy_file_cache(open(image, 'rb'))

    # read 100 bytes
    sh = StringIO(correct_data)
    with cache.open() as fh:
        actual = fh.read(100)
        expected = sh.read(100)
        assert (actual == expected)
        actual = fh.read(0)
        expected = sh.read(0)
        assert (actual == expected)
        actual = fh.read(5000)
        expected = sh.read(5000)
        assert (actual == expected)

    # readlines
    sh = StringIO(correct_data)
    with cache.open() as fh:
        actual = fh.readlines()
        expected = sh.readlines()
        assert (actual == expected)

    # 5x readline
    sh = StringIO(correct_data)
    with cache.open() as fh:
        actual = [fh.readline() for n in range(5)]
        expected = [sh.readline() for n in range(5)]
        assert (actual == expected)

    # Get a new cache object
    cache.close()
    cache = dxtbx.filecache.lazy_file_cache(open(image, 'rb'))
    cache._page_size = 5
    fh = dxtbx.filecache.pseudo_file(cache)

    # readline stress test
    sh = StringIO(correct_data)
    with cache.open() as fh:
        actual = fh.readline()
        expected = sh.readline()
        assert (actual == expected)

        actual = fh.read(68)
        expected = sh.read(68)
        assert (actual == expected)

        actual = fh.readline()
        expected = sh.readline()
        assert (actual == expected)

        actual = fh.read(1)
        expected = sh.read(1)
        assert (actual == expected)

    # Get a new cache object
    cache.close()
    cache = dxtbx.filecache.lazy_file_cache(open(image, 'rb'))

    sh = StringIO(correct_data)
    fh = dxtbx.filecache.pseudo_file(cache)
    import random

    random_a, random_b = random.randint(0, 10000), random.randint(0, 150000)
    print "Running test for parameters %d %d" % (random_a, random_b)

    actual   = (fh.readline(), \
                fh.read(random_a), \
                fh.readline(), \
                fh.read(random_b))
    expected = (sh.readline(), \
                sh.read(random_a), \
                sh.readline(), \
                sh.read(random_b))
    assert (actual == expected)
Example #33
0
out_s.write(pickled_data)

o = SimpleObject("dismatch")
pickled_data = pickle.dumps(o)
digest = make_digest("Ohter Data")
header = "%s %s" % (digest, len(pickled_data))
print "\nWRITING:", header
out_s.write(header + '\n')
out_s.write(pickled_data)

out_s.flush()

# 读取端
in_s = StringIO(out_s.getvalue())
while True:
    first_line = in_s.readline()
    if not first_line:
        break
    incoming_digest, incoming_length = first_line.split(' ')
    print '\nRead:', incoming_digest, incoming_length.strip()

    incoming_length = int(incoming_length)
    incoming_pickled_data = in_s.read(incoming_length)
    actual_digest = make_digest(incoming_pickled_data)
    print "Autual:", actual_digest

    if actual_digest != incoming_digest:
        print "WARNING: Data corruption"
    else:
        obj = pickle.loads(incoming_pickled_data)
        print "OK:", obj
Example #34
0
    def _load_crustal_model(self):

        if self._directory is not None:
            path_keys = os.path.join(self._directory, Crust2.fn_keys)
            f = open(path_keys, 'r')
        else:
            from pyrocko.crust2x2_data import decode, type2_key, type2, \
                elevation

            f = StringIO(decode(type2_key))

        # skip header
        for i in range(5):
            f.readline()

        profiles = {}
        while True:
            line = f.readline()
            if not line:
                break
            ident, name = line.split(None, 1)
            line = f.readline()
            vp = _sa2arr(line.split()) * 1000.
            line = f.readline()
            vs = _sa2arr(line.split()) * 1000.
            line = f.readline()
            rho = _sa2arr(line.split()) * 1000.
            line = f.readline()
            toks = line.split()
            thickness = _sa2arr(toks[:-2]) * 1000.

            assert ident not in profiles

            profiles[ident] = Crust2Profile(ident.strip(), name.strip(), vp,
                                            vs, rho, thickness, 0.0)

        f.close()

        self._raw_profiles = profiles
        self.profile_keys = sorted(profiles.keys())

        if self._directory is not None:
            path_map = os.path.join(self._directory, Crust2.fn_map)
            f = open(path_map, 'r')
        else:
            f = StringIO(decode(type2))

        f.readline()  # header

        amap = {}
        for ila, line in enumerate(f):
            keys = line.split()[1:]
            for ilo, key in enumerate(keys):
                amap[ila, ilo] = copy.deepcopy(profiles[key])

        f.close()

        if self._directory is not None:
            path_elevation = os.path.join(self._directory, Crust2.fn_elevation)
            f = open(path_elevation, 'r')

        else:
            f = StringIO(decode(elevation))

        f.readline()
        for ila, line in enumerate(f):
            for ilo, s in enumerate(line.split()[1:]):
                p = amap[ila, ilo]
                p.set_elevation(float(s))
                if p.elevation() < 0.:
                    p.set_layer_thickness(LWATER, -p.elevation())

        f.close()

        self._typemap = amap
Example #35
0
class seek_wrapper:
    """Adds a seek method to a file object.

    This is only designed for seeking on readonly file-like objects.

    Wrapped file-like object must have a read method.  The readline method is
    only supported if that method is present on the wrapped object.  The
    readlines method is always supported.  xreadlines and iteration are
    supported only for Python 2.2 and above.

    Public attribute: wrapped (the wrapped file object).

    WARNING: All other attributes of the wrapped object (ie. those that are not
    one of wrapped, read, readline, readlines, xreadlines, __iter__ and next)
    are passed through unaltered, which may or may not make sense for your
    particular file object.

    """

    # General strategy is to check that cache is full enough, then delegate
    # everything to the cache (self._cache, which is a StringIO.StringIO
    # instance.  Seems to be some cStringIO.StringIO problem on 1.5.2 -- I
    # get a StringOobject, with no readlines method.

    # Invariant: the end of the cache is always at the same place as the
    # end of the wrapped file:
    # self.wrapped.tell() == len(self._cache.getvalue())

    def __init__(self, wrapped):
        self.wrapped = wrapped
        self.__have_readline = hasattr(self.wrapped, "readline")
        self.__cache = StringIO()

    def __getattr__(self, name):
        return getattr(self.wrapped, name)

    def seek(self, offset, whence=0):
        # make sure we have read all data up to the point we are seeking to
        pos = self.__cache.tell()
        if whence == 0:  # absolute
            to_read = offset - pos
        elif whence == 1:  # relative to current position
            to_read = offset
        elif whence == 2:  # relative to end of *wrapped* file
            # since we don't know yet where the end of that file is, we must
            # read everything
            to_read = None
        if to_read >= 0 or to_read is None:
            if to_read is None:
                self.__cache.write(self.wrapped.read())
            else:
                self.__cache.write(self.wrapped.read(to_read))
            self.__cache.seek(pos)

        return self.__cache.seek(offset, whence)

    def read(self, size=-1):
        pos = self.__cache.tell()

        self.__cache.seek(pos)

        end = len(self.__cache.getvalue())
        available = end - pos

        # enough data already cached?
        if size <= available and size != -1:
            return self.__cache.read(size)

        # no, so read sufficient data from wrapped file and cache it
        to_read = size - available
        assert to_read > 0 or size == -1
        self.__cache.seek(0, 2)
        if size == -1:
            self.__cache.write(self.wrapped.read())
        else:
            self.__cache.write(self.wrapped.read(to_read))
        self.__cache.seek(pos)

        return self.__cache.read(size)

    def readline(self, size=-1):
        if not self.__have_readline:
            raise NotImplementedError("no readline method on wrapped object")

        # line we're about to read might not be complete in the cache, so
        # read another line first
        pos = self.__cache.tell()
        self.__cache.seek(0, 2)
        self.__cache.write(self.wrapped.readline())
        self.__cache.seek(pos)

        data = self.__cache.readline()
        if size != -1:
            r = data[:size]
            self.__cache.seek(pos + size)
        else:
            r = data
        return r

    def readlines(self, sizehint=-1):
        pos = self.__cache.tell()
        self.__cache.seek(0, 2)
        self.__cache.write(self.wrapped.read())
        self.__cache.seek(pos)
        try:
            return self.__cache.readlines(sizehint)
        except TypeError:  # 1.5.2 hack
            return self.__cache.readlines()

    def __iter__(self):
        return self

    def next(self):
        line = self.readline()
        if line == "": raise StopIteration
        return line

    xreadlines = __iter__

    def __repr__(self):
        return ("<%s at %s whose wrapped object = %s>" %
                (self.__class__.__name__, ` id(self) `, ` self.wrapped `))

    def close(self):
        self.read = None
        self.readline = None
        self.readlines = None
        self.seek = None
        if self.wrapped: self.wrapped.close()
        self.wrapped = None
Example #36
0
class HTTPHandler(BaseHTTPRequestHandler):
    def __init__(self,
                 request_string,
                 client_address,
                 server_version=None,
                 sys_version=None):
        """
        Encapsulates http request parsing and facilitates generation of proper (and improper) http response.

        :param request_string: raw HTTP request to be parsed.
        :param client_address: tuple containing clients ip and source port.
        :param server_version: set server version to be used in response header (Optional).
        :param sys_version: set sys version to be used in response header (Optional).
        """
        #Parent class expects fileobjects
        self.rfile = StringIO(request_string)
        self.wfile = StringIO()
        self.rfile.seek(0)

        self.client_address = client_address

        self.requestline = ''
        self.request_version = 'HTTP/1.0'
        self.path = ''
        self.command = ''
        self.query = ''
        self.raw_requestline = ''
        self.close_connection = None
        self.request_body = ''
        self.http_host = ''

        #parse the request
        self.handle_one_request()

        #If not defined default values will be provided by parent.
        if server_version:
            self.server_version = server_version
        if sys_version:
            self.sys_version = sys_version

        #The following instance variables ensures consistent naming.
        url = urlparse.urlparse(self.path)
        #path +  parameters + query strign + fragment (ex: /mad.php;woot?a=c#beer.
        self.request_url = self.path
        #the entire http request
        self.request_raw = request_string
        #parsed query dictionary. See http://docs.python.org/2/library/urlparse.html for the format.
        self.request_query = urlparse.parse_qs(url.query, True)
        #parameters (no, this it NOT the query string!)
        self.request_params = url.params
        #the clean path. (ex: /info.php)
        self.request_path = url.path
        #GET, POST, DELETE, TRACE, etc.
        self.request_verb = self.command
        if hasattr(self, 'headers'):
            self.request_headers = self.headers
            #http host from request
            self.http_host = self.headers.get('Host')
        else:
            self.request_headers = BaseHTTPRequestHandler.MessageClass

    def handle_one_request(self):
        """
        Handles and parses the request.
        """
        self.raw_requestline = self.rfile.readline(65537)
        if len(self.raw_requestline) > 65536:
            self.send_error(414)
            return
        if not self.raw_requestline:
            self.close_connection = 1
            return
        # parse_request(duh), parsing errors will result in a proper http response(self.get_get_response())
        if not self.parse_request():
            # An error code has been sent, just exit
            return
        # In the original implementation this method would had called the 'do_' + self.command method
        if not self.command in ('PUT', 'GET', 'POST', 'HEAD', 'TRACE',
                                'OPTIONS'):
            self.send_error(501, "Unsupported method (%r)" % self.command)
            return

        # At this point we have parsed the headers which means that
        # the rest of the request is the body
        self.request_body = self.rfile.read()

    def set_response(self,
                     body,
                     http_code=200,
                     headers=(('Content-type', 'text/html'), )):
        """
        Sets body, response code and headers. Mapping between http_code and error text is handled
        by the parent class.

        :param body: the response body.
        :param http_code: http code to be used in response (default=200).
        :param headers: tuple of (header, value) pairs for the response header (default= (('Content-type', 'text/html'),))
        """
        self.send_response(http_code)
        for header in headers:
            self.send_header(header[0], header[1])
        self.end_headers()
        self.wfile.write(body)

    def set_raw_response(self, content):
        """
        Provides a convenient way to fully control the entire http response. This comes handy when writing attack modules
        which often breaks protocol standards.
        """
        self.wfile = StringIO(content)

    def send_error(self, code, message=None):
        """
        Generates a proper http error response. This method is guaranteed to raise a HTTPError exception after the
        response has been generated.

        :param code: http error code to return.
        :param message: error message in plain text, if not provided a text match will be lookup using the error code. (Optional).
        :raise: HTTPError
        """
        BaseHTTPRequestHandler.send_error(self, code, message)
        #raise error so that we can make sure this request is not passed to attack handlers
        raise HTTPError(self.get_response())

    def get_response(self):
        """
        Returns the entire http response.
        """
        return self.wfile.getvalue()

    def get_response_header(self):
        """
        Returns the http response header.
        """
        if "\r\n\r\n" in self.wfile.getvalue():
            return self.wfile.getvalue().split('\r\n\r\n', 1)[0]
        else:
            return self.wfile.getvalue()

    def get_response_body(self):
        """
        Returns the http response body.
        """
        if '\r\n\r\n' in self.wfile.getvalue():
            return self.wfile.getvalue().split('\r\n\r\n', 1)[1]
        else:
            return self.wfile.getvalue()

    def log_message(self, log_format, *args):
        pass

    def version_string(self):
        """
        Return the server software version string.
        This will be included in the http response
        """
        return self.server_version + ' ' + self.sys_version
Example #37
0
def parse_output(obj):
    """
    Param:
    -------
    obj: str or object with `readline` method
         the object to be parsed
    
    Return:
    -------
    The node lists
    The edges

    >>> from codecs import open
    >>> t1 = parse_output(open("data/test_parse_tree.txt", "r", "utf8"))
    >>> len(t1)
    1
    >>> t2 = parse_output(open("data/test_parse_tree.txt", "r", "utf8").read())
    >>> len(t2)
    1
    >>> assert t1[0].nodes == t2[0].nodes
    >>> assert t1[0].edges == t2[0].edges
    >>> print t1[0].nodes
    [ROOT-0, Schneider(NNP)-1, Electric(NNP)-2, Introduces(VBZ)-3, Strategic(NNP)-4, Operation(NNP)-5, Services(NNPS)-6, Offerings(NNPS])-7]
    >>> print t1[0].edges
    [(ROOT-0, Introduces(VBZ)-3, root), (Electric(NNP)-2, Schneider(NNP)-1, nn), (Introduces(VBZ)-3, Electric(NNP)-2, nsubj), (Offerings(NNPS])-7, Strategic(NNP)-4, nn), (Offerings(NNPS])-7, Operation(NNP)-5, nn), (Offerings(NNPS])-7, Services(NNPS)-6, nn), (Introduces(VBZ)-3, Offerings(NNPS])-7, dobj)]
    >>> t3 = parse_output(open("data/test_parse_tree_multi_sent_case.txt", "r", "utf8").read())
    >>> len(t3) 
    2
    """
    SENT_PREFIX = "Sentence #"
    if isinstance(obj, basestring):
        obj = StringIO(obj)
    else:
        assert hasattr(obj, 'readline'), "obj should have `readline` method "
        assert hasattr(obj, 'readlines'), "obj should have `readlines` method "

    results = []

    sent_id = 1

    # spends the first line as it's useless
    l = obj.readline()
    assert l.startswith(SENT_PREFIX)
    while True:
        sentence = obj.readline().strip()

        if len(sentence) == 0:  #end of story
            break

        nodes = parse_token_pos_line(obj.readline(), prepend_root=True)
        edges = []
        for l in obj:
            if len(l.strip()) == 0:  # skip non-sense lines
                continue
            if l.startswith(SENT_PREFIX):
                break
            edges.append(parse_edge_line(l.strip(), nodes))

        results.append(DepParseResult(sent_id, sentence, nodes, edges))
        sent_id += 1

    return results
Example #38
0
 def __getURL(self, url, timeout=5):
     """
     this is a simplified replacement for urllib2 which doesn't support setting a timeout.
     by default, if timeout is not specified, it waits 5 seconds
     """
     r = re.compile(r"http://([^:/]+)(:(\d+))?(/.*)").match(url)
     if r is None:
         self.logger.log(Logger.ERROR, "Cannot open "+url+". Incorrectly formed URL.")
         return None
     host = r.group(1)
     if r.group(3) == None:
         port = 80  # no port is given, pick the default 80 for HTTP
     else:
         port = int(r.group(3))
     if r.group(4) == None:
         path = ""  # no path is give, let server decide
     else:
         path = r.group(4).encode()
     sock = None
     err = None
     try:
         for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
             af, socktype, proto, dummycanonname, sa = res
             try:
                 sock = socket.socket(af, socktype, proto)
             except socket.error as msg:
                 sock = None
                 err = msg
                 continue
             try:
                 if hasattr(sock, 'settimeout'):
                     self.logger.log(Logger.DEBUG, "Setting socket timeout with settimeout.")
                     sock.settimeout(timeout)
                 else:
                     self.logger.log(Logger.DEBUG, "Setting socket timeout with setsockopt.")
                     sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDTIMEO, struct.pack("ii", timeout, 0))
                     sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, struct.pack("ii", timeout, 0))
                 sock.connect(sa)
             except socket.error as msg:
                 sock.close()
                 sock = None
                 err = msg
                 continue
             break
     except socket.error as msg:
         sock = None
         err = msg
     if sock is None:
         self.logger.log(Logger.ERROR, "Cannot open "+url)
         self.logger.log(Logger.ERROR, "SocketError: "+str(err))
         return None
     try:
         sock.send(b"GET "+path+b" HTTP/1.0\n\n")
         data = ""
         done = False
         while not done:
             moreData = sock.recv(4096)
             data += moreData.decode("utf-8")
             done = len(moreData) == 0
         sock.close()
         fd = StringIO(data)
         httpStatus = 0
         while True:
             line = fd.readline().strip()
             if line == "":
                 break  # exit at the end of file or at the first empty line(finish of http headers)
             r = re.compile(r"HTTP/\d.\d(\d+)").match(line)
             if r is not None:
                 httpStatus = int(r.group(1))
             if httpStatus == 200:
                 return fd
         else:
             self.logger.log(Logger.ERROR, "Cannot open "+url)
             if httpStatus == 401:
                 self.logger.log(Logger.ERROR, 'HTTPError: not authorized ['+str(httpStatus)+']')
             elif httpStatus == 404:
                 self.logger.log(Logger.ERROR, 'HTTPError: not found ['+str(httpStatus)+']')
             elif httpStatus == 503:
                 self.logger.log(Logger.ERROR, 'HTTPError: service unavailable ['+str(httpStatus)+']')
             else:
                 self.logger.log(Logger.ERROR, 'HTTPError: unknown error ['+str(httpStatus)+']')
             return None
     except socket.error as msg:
         self.logger.log(Logger.ERROR, "Cannot open "+url)
         self.logger.log(Logger.ERROR, "SocketError: "+str(msg))
         sock.close()
         return None
Example #39
0
def handle_data():
    request = app.current_request
    d = parse_qs(app.current_request.raw_body.decode())
    # data to csv

    try:
        my_dict = {k: v[0] for k, v in d.iteritems()}
    except AttributeError:
        my_dict = {k: v[0] for k, v in d.items()}

    my_dict['content'] = my_dict.pop('raw_text')
    f = StringIO()
    w = csv.DictWriter(f, my_dict.keys())
    w.writeheader()
    w.writerow(my_dict)

    content_type = 'text/csv'
    headers = {'Content-Type': content_type, 'Accept': 'Accept'}

    res = sagemaker.invoke_endpoint(EndpointName='CHALICE_PROJECT',
                                    Body=f.getvalue(),
                                    ContentType='text/csv',
                                    Accept='Accept')
    # res.json to dict
    # format dict to pretty

    result = res['Body']

    f = StringIO()
    f.write(result.read().decode().replace("\\r\\n",
                                           "<br>").replace("\\n", "<br>"))
    f.seek(0)

    result_html = """
<!DOCTYPE html>
<html>
<head>
        <title>Model Prediction</title>
<style>
body{{
        background-color: white;
        text-align: center;
        color: #545b64;
        font-size: 16px;

}}
</style>
</head>
<body>
        <center>{0}</center>
        <table align=center><tr valign=top align=left><td width=28%>{1}</td><td width=28%>{2}</td><td width=28%>{3}</td></tr></table>

</body>
</html>


    """.format(f.readline(), f.readline(), f.readline(), f.readline())

    return Response(body=result_html,
                    status_code=200,
                    headers={'Content-Type': 'text/html'})
Example #40
0
import commands

from StringIO import StringIO
from buildbot.scripts import runner

MASTER = sys.argv[1]

CHANGESET_ID = os.environ["HG_NODE"]

# TODO: consider doing 'import mercurial.hg' and extract this information
# using the native python
out = commands.getoutput(
    "hg log -r %s --template '{author}\n{files}\n{desc}'" % CHANGESET_ID)

s = StringIO(out)
user = s.readline().strip()
# NOTE: this fail when filenames contain spaces. I cannot find a way to get
# hg to use some other filename separator.
files = s.readline().strip().split()
comments = "".join(s.readlines())

change = {
    'master': MASTER,
    # note: this is more likely to be a full email address, which would make
    # the left-hand "Changes" column kind of wide. The buildmaster should
    # probably be improved to display an abbreviation of the username.
    'username': user,
    'revision': CHANGESET_ID,
    'comments': comments,
    'files': files,
}
Example #41
0
class HTTPRepeater(BaseHTTPRequestHandler):
    def __init__(self, url, request_text, proxy=None):  #还是要求url算了
        self.rfile = StringIO(request_text)
        self.raw_requestline = self.rfile.readline()
        self.parse_request()

        ###parameters that needed in repeater request
        self.proxy = proxy
        self.method = self.command
        #self.data = self.rfile.read(int(self.headers['content-length'])) #固定长度
        self.data = self.rfile.read()  #变动长度,直到结束;适合数据包修改之后。
        self.header = self.headers.dict

        self.header["content-length"] = str(len(
            self.data))  # 更新content-length,数据包修改过后
        if "cookie" in self.header.keys():
            cookie_string = self.header["cookie"]
            cookie = SimpleCookie(cookie_string)
            self.cookie = {i.key: i.value for i in cookie.values()}
            #self.cookie = Cookie.Cookie()
            #self.cookie.load(cookie_string)
        else:
            self.cookie = None

        #####do some check on url and request raw
        if url:
            print urlparse.urlparse(url)
            TextPath = "{0}?{1}".format(
                urlparse.urlparse(url).path,
                urlparse.urlparse(url).query)
            if TextPath == self.path:
                self.url = url
            else:
                print "Error! url is different from the request text"

        elif "origin" in self.headers.keys():
            self.url = self.headers["origin"]
        elif "referer" in self.header.keys() and "host" in self.header.keys():
            if urlparse.urlparse(
                    self.headers["referer"]).netloc == self.headers["host"]:
                scheme = urlparse.urlparse(self.headers["origin"]).scheme
                self.url = "{0}{1}{2}".format(scheme, self.headers["host"],
                                              self.path)
        else:
            print("please specify the url")

    def show(self):
        print("Method: {0}".format(self.method))
        print("Header: {0}".format(self.header))
        print("Cookie: {0}".format(self.cookie))
        print("Data: {0}".format(self.data))

    def repeat(self):  #do request
        DoMethod = getattr(requests.Session(), self.method.lower())

        if self.url.startswith("https://") and self.proxy:
            verify = False
        else:
            verify = True
        respone = DoMethod(self.url,
                           headers=self.header,
                           cookies=self.cookie,
                           data=self.data,
                           proxies=self.proxy,
                           verify=verify)
        return respone
Example #42
0
 def readline(self, length=None):
     if self.closed:
         return ''
     return StringIO.readline(self, length)
# assumed to be the rest of the help message for either an option or a group.
regx_helpmsg = re.compile(r'^\s+(?P<msg>.+)')
# Note: this will match both optmeta and optmsg lines, so need to test for
# for those before this.

# Lines that do not start with whitespace will be considered to be the start of
# a new option group. This is mutually exclusive of all the previous regxs,
# since they all require whitespace at the start of a line.
regx_newgroup = re.compile(r'^(?P<msg>\S.+)')

# now format the string buffer into a rst table
fp.seek(0)
# we want to skip everything up to the "optional arguments:"
skip = True
while skip:
    line = fp.readline()
    m = regx_newgroup.match(line)
    if m is not None:
        skip = m.group('msg') != 'optional arguments:'

# advance past the 'optional arguments:' and the 'help' line
line = fp.readline()
line = fp.readline()

# now read through the rest of the lines, converting options into a list of
# tuples with order (option, meta data, help message), grouped by option groups
# add a header row
header = Row(wrap_option=False)
header.option = 'Name'
header.metavar = 'Syntax'
header.helpmsg = 'Description'
Example #44
0
class TestFuzzLoggerCsv(unittest.TestCase):
    def setUp(self):
        self.virtual_file = StringIO()
        self.logger = fuzz_logger_csv.FuzzLoggerCsv(file_handle=self.virtual_file)
        self.some_test_case_id = "some test case"
        self.some_test_case_name = "some test case name"
        self.some_test_case_index = 3
        self.some_test_step_msg = "Test!!!"
        self.some_log_check_msg = "logging"
        self.some_log_info_msg = "information"
        self.some_log_fail_msg = "broken"
        self.some_log_pass_msg = "it works so far!"
        self.some_log_error_msg = "D:"
        self.some_recv_data = six.binary_type(b'A B C')
        self.some_send_data = six.binary_type(b'123')

    def test_open_test_case(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
        Then: open_test_case logs as expected.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))

    def test_open_test_step(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling open_test_step with some description.
        Then: open_test_case logs as expected.
         and: open_test_step logs as expected.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.open_test_step(self.some_test_step_msg)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("open step,,," + self.some_test_step_msg + "\r\n"))

    def test_log_check(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_check with some description.
        Then: open_test_case logs as expected.
         and: log_check logs as expected.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_check(self.some_log_check_msg)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("check,,," + self.some_log_check_msg + "\r\n"))

    def test_log_error(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_error with some description.
        Then: open_test_case logs as expected.
         and: log_error logs as expected.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_error(self.some_log_error_msg)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("error,,," + self.some_log_error_msg + "\r\n"))

    def test_log_recv(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_recv with some data.
        Then: open_test_case logs as expected.
         and: log_recv logs as expected.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_recv(self.some_recv_data)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "recv," + str(len(self.some_recv_data)) + "," + fuzz_logger_csv.DEFAULT_HEX_TO_STR(
                                self.some_recv_data) + "," + self.some_recv_data.decode() + "\r\n"))

    def test_log_send(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_send with some data.
        Then: open_test_case logs as expected.
         and: log_send logs as expected.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_send(self.some_send_data)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "send," + str(len(self.some_send_data)) + "," + fuzz_logger_csv.DEFAULT_HEX_TO_STR(
                                self.some_send_data) + "," + self.some_send_data.decode() + "\r\n"))

    def test_log_info(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_info with some description.
        Then: open_test_case logs as expected.
         and: log_info logs as expected.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_info(self.some_log_info_msg)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("info,,," + self.some_log_info_msg + "\r\n"))

    def test_log_fail(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_fail with some description.
        Then: open_test_case logs as expected.
         and: log_fail logs as expected.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_fail(self.some_log_fail_msg)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("fail,,," + self.some_log_fail_msg + "\r\n"))

    def test_log_pass(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_pass with some description.
        Then: open_test_case logs as expected.
         and: log_pass logs as expected.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_pass(self.some_log_pass_msg)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("pass,,," + self.some_log_pass_msg + "\r\n"))

    def test_open_test_case_empty(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with an empty string.
        Then: open_test_case logs with a zero-length test case id.
        """
        # When
        self.logger.open_test_case('', name=self.some_test_case_name, index=self.some_test_case_index)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("open test case,,,Test case \r\n"))

    def test_open_test_step_empty(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling open_test_step with an empty string.
        Then: open_test_step logs with a zero-length description.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.open_test_step('')

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("open step,,,\r\n"))

    def test_log_check_empty(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_check with an empty string.
        Then: log_check logs with a zero-length description.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_check('')

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("check,,,\r\n"))

    def test_log_error_empty(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_error with an empty string.
        Then: log_error logs with a zero-length description.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_error('')

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("error,,,\r\n"))

    def test_log_recv_empty(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_recv with an empty buffer.
        Then: log_recv logs with zero-length data.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_recv(six.binary_type(b''))

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "recv,0," + fuzz_logger_csv.DEFAULT_HEX_TO_STR(bytes(b'')) + ",\r\n"))

    def test_log_send_empty(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_send with an empty buffer.
        Then: log_send logs with zero-length data.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_send(six.binary_type(b''))

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "send,0," + fuzz_logger_csv.DEFAULT_HEX_TO_STR(bytes(b'')) + ",\r\n"))

    def test_log_info_empty(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_info with an empty string.
        Then: log_info logs with a zero-length description.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_info('')

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("info,,,\r\n"))

    def test_log_fail_empty(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_fail with no argument.
        Then: log_fail logs with a zero-length description.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_fail('')

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("fail,,,\r\n"))

    def test_log_pass_empty(self):
        """
        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_pass with no argument.
        Then: log_pass logs with a zero-length description.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_pass('')

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("pass,,,\r\n"))

    def test_several(self):
        """
        Verify that log functions work consistently in series.

        Given: FuzzLoggerCsv with a virtual file handle.
        When: Calling open_test_case with some test_case_id.
         and: Calling open_test_step with some description.
         and: Calling log_recv with some data.
         and: Calling log_send with some data.
         and: Calling log_info with some description.
         and: Calling log_check with some description.
         and: Calling log_fail with some description.
         and: Calling log_pass with some description.
         and: Calling log_error with some description.
        Then: All methods log as expected.
        """
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.open_test_step(self.some_test_step_msg)
        self.logger.log_recv(self.some_recv_data)
        self.logger.log_send(self.some_send_data)
        self.logger.log_info(self.some_log_info_msg)
        self.logger.log_check(self.some_log_check_msg)
        self.logger.log_fail(self.some_log_fail_msg)
        self.logger.log_pass(self.some_log_pass_msg)
        self.logger.log_error(self.some_log_error_msg)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("open step,,," + self.some_test_step_msg + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "recv," + str(len(self.some_recv_data)) + "," + fuzz_logger_csv.DEFAULT_HEX_TO_STR(
                                self.some_recv_data) + "," + self.some_recv_data.decode() + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "send," + str(len(self.some_send_data)) + "," + fuzz_logger_csv.DEFAULT_HEX_TO_STR(
                                self.some_send_data) + "," + self.some_send_data.decode() + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("info,,," + self.some_log_info_msg + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("check,,," + self.some_log_check_msg + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("fail,,," + self.some_log_fail_msg + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("pass,,," + self.some_log_pass_msg + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape("error,,," + self.some_log_error_msg + "\r\n"))

    def test_hex_to_str_function(self):
        """
        Verify that the UUT uses the custom hex_to_str function, if provided.

        Given: FuzzLoggerCsv with a virtual file handle and custom hex_to_str
               function.
        When: Calling open_test_case with some test_case_id.
         and: Calling log_recv with some data.
        Then: open_test_case logs as expected.
         and: log_recv logs as expected, using the custom hex_to_str function.
        """

        # Given
        def hex_to_str(hex_data):
            return hex_data.decode()

        self.logger = fuzz_logger_csv.FuzzLoggerCsv(file_handle=self.virtual_file,
                                                    bytes_to_str=hex_to_str)
        # When
        self.logger.open_test_case(self.some_test_case_id,
                                   name=self.some_test_case_name,
                                   index=self.some_test_case_index)
        self.logger.log_recv(self.some_recv_data)

        # Then
        self.virtual_file.seek(0)
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "open test case,,,Test case " + self.some_test_case_id + "\r\n"))
        six.assertRegex(self, self.virtual_file.readline(),
                        LOGGER_PREAMBLE + re.escape(
                            "recv," + str(len(self.some_recv_data)) + "," + hex_to_str(
                                self.some_recv_data) + "," + self.some_recv_data.decode() + "\r\n"))
Example #45
0
    sys.exit(1)



# TODO: consider doing 'import mercurial.hg' and extract this information
# using the native python
out = commands.getoutput("hg -v log -r %s" % revision)

print("hg -v log:\n%s" % out)

# TODO: or maybe use --template instead of trying hard to parse everything
#out = commands.getoutput("hg --template SOMETHING log -r %s" % CHANGESET_ID)

s = StringIO(out)
while True:
    line = s.readline()
    if not line:
        break
    if line.startswith("changeset:"):
        revision = line[line.find(":")+1:].strip()

# Write out a file that contains the output from "hg log"
filename = "change-%s" % revision[0:revision.find(":")]
changefile = open("/var/log/%s" % filename, "w")
changefile.write(out)
changefile.close()

# Copy the change file out to buildbot master host machine
scpcall = commands.getoutput("scp /var/log/%s %s/%s" % (filename, DESTINATION, filename))

# Remove the file as it is no longer needed
Example #46
0
class ProxyConnection(ConnectionHandler):
    def __init__(self, *args, **kwargs):
        super(ProxyConnection, self).__init__(*args, **kwargs)
        self._buffer = StringIO()
        self._schema = None

        self._local_socket = None
        self._remote_socket = None

        self.on_data = self.on_local_data
        self.on_close = self.on_local_closed

    def on_data(self, data):
        self.on_local_data(data)

    def on_connect(self, sock, address):
        self._local_socket = sock
        # logger.debug("New proxy connection established with %s" % str(self.address))

    @silent_fail(log=True)
    def on_local_data(self, data):
        if len(data) == 0:
            return

        if self._schema is not None:
            if hasattr(self._schema, 'on_local_data'):
                return self._schema.on_local_data(data)
        else:
            self._buffer.write(data)
            schema = self._check_for_schema()
            if schema is not None:
                self._schema = schema(self, self._buffer)

    @silent_fail(log=True)
    def on_remote_data(self, data):
        if len(data) == 0:
            return

        if hasattr(self._schema, 'on_remote_data'):
            return self._schema.on_remote_data(data)

    @silent_fail(log=True)
    def on_local_closed(self):
        if self._remote_socket is None:
            return

        try:
            self._remote_socket.close()
        except socket.error:
            pass

    @silent_fail(log=True)
    def on_remote_closed(self):
        try:
            self._local_socket.close()
        except socket.error:
            pass

    def start_remote(self, sock):
        self._remote_socket = sock

        def remote_reader():
            try:
                while True:
                    buff = sock.recv(1024)
                    gevent.sleep(0)
                    if not buff:
                        self.on_remote_closed()
                        break

                    self.on_remote_data(buff)
            except Exception as e:
                logger.error(e)

        gevent.spawn(remote_reader)

    def send_remote(self, data):
        if len(data) == 0:
            return
        self._remote_socket.send(data)

    def send_local(self, data):
        if len(data) == 0:
            return
        self._local_socket.send(data)

    def close_local(self):
        self._local_socket.close()

    def close_remote(self):
        self._remote_socket.close()

    def _check_for_schema(self):
        buff = self._buffer.getvalue()
        if '\n' in buff:
            self._buffer.seek(0)
            firstline = self._buffer.readline()
            match = re.match("(?:GET|POST|PUT|DELETE|HEAD) (.+) \w+",
                             firstline)
            if match is not None:
                return HttpSchema
            match = re.match("(?:CONNECT) (.+) \w*", firstline)
            if match is not None:
                return SSLSchema
        return None
Example #47
0
class FakePSU(object):
    """
    Fake PSU serial object for testing
    """
    def __init__(self, port, baud):
        self.port = port
        self.baud = baud
        self.serbuf = StringIO()
        self.outbuf = StringIO()
        self.in_waiting = 0
        self.cmd_lookup = {
            1: self.setTE,
            2: self.setOEP,
            3: self.setOEN,
            4: self.setVP,
            5: self.setAP,
            6: self.setVN,
            7: self.setAN,
            8: self.getSettings,
            9: self.setACH,
            10: self.setACL
        }

        self.vals = [0] * 11

    def inWaiting(self):
        return self.in_waiting

    def setTE(self, val):
        self.vals[4] = val
        self.writeOut('ACK\n')

    def setOEP(self, val):
        self.vals[5] = val
        self.writeOut('ACK\n')

    def setOEN(self, val):
        self.vals[6] = val
        self.writeOut('ACK\n')

    def setVP(self, val):
        self.vals[0] = val
        self.writeOut('ACK\n')

    def setVN(self, val):
        self.vals[1] = val
        self.writeOut('ACK\n')

    def setAP(self, val):
        self.vals[2] = val
        self.writeOut('ACK\n')

    def setAN(self, val):
        self.vals[3] = val
        self.writeOut('ACK\n')

    def setACH(self, chan):
        self.vals[6 + chan] = 1
        self.writeOut('ACK\n')

    def setACL(self, chan):
        self.vals[6 + chan] = 0
        self.writeOut('ACK\n')

    def getSettings(self, val):
        self.writeOut("V:%s,%s A:%s,%s S:0,0,0 R:%s,%s,%s,%s,%s,%s,%s\n" %
                      tuple(self.vals))

    def runCmd(self, cmd):
        if ',' in cmd:
            c, p = cmd.split(',', 1)
            c, p = int(c), int(p)
            self.cmd_lookup[c](p)

    def write(self, string):
        self.serbuf.write(string)
        if '\n' in string:
            # Execute a command
            b = self.serbuf.getvalue().split('\n', 1)
            q = b[0]
            self.serbuf = StringIO(b[-1])
            self.serbuf.seek(self.serbuf.len)
            self.runCmd(q)

    def writeOut(self, string):
        cur = self.outbuf.tell()
        self.outbuf.seek(self.outbuf.len)
        self.outbuf.write(string)
        self.outbuf.seek(cur)

        self.in_waiting = self.outbuf.len - cur

    def readline(self):
        bread = self.outbuf.readline()
        self.in_waiting -= len(bread)
        return bread

    def read(self, n=-1):
        bread = self.outbuf.read(n=n)
        self.in_waiting -= len(bread)
        return bread
Example #48
0
 def readline(self, *args, **kwargs):
     result = StringIO.readline(self, *args, **kwargs)
     self._stdout.write(result)
     return result
Example #49
0
    def _parse_stat(self, data):
        """ Parse the output from the 'stat' 4letter word command """
        h = StringIO(data)

        result = {}

        version = h.readline()
        if version:
            result['zk_version'] = version[version.index(':') + 1:].strip()

        # skip all lines until we find the empty one
        while h.readline().strip():
            pass

        for line in h.readlines():
            m = re.match('Latency min/avg/max: (\d+)/(\d+)/(\d+)', line)
            if m is not None:
                result['zk_min_latency'] = int(m.group(1))
                result['zk_avg_latency'] = int(m.group(2))
                result['zk_max_latency'] = int(m.group(3))
                continue

            m = re.match('Received: (\d+)', line)
            if m is not None:
                result['zk_packets_received'] = int(m.group(1))
                continue

            m = re.match('Sent: (\d+)', line)
            if m is not None:
                result['zk_packets_sent'] = int(m.group(1))
                continue

            m = re.match('Alive connections: (\d+)', line)
            if m is not None:
                result['zk_num_alive_connections'] = int(m.group(1))
                continue

            m = re.match('Outstanding: (\d+)', line)
            if m is not None:
                result['zk_outstanding_requests'] = int(m.group(1))
                continue

            m = re.match('Mode: (.*)', line)
            if m is not None:
                result['zk_server_state'] = m.group(1)
                continue

            m = re.match('Node count: (\d+)', line)
            if m is not None:
                result['zk_znode_count'] = int(m.group(1))
                continue

            m = re.match('Watch count: (\d+)', line)
            if m is not None:
                result['zk_watch_count'] = int(m.group(1))
                continue

            m = re.match('Ephemerals count: (\d+)', line)
            if m is not None:
                result['zk_ephemerals_count'] = int(m.group(1))
                continue

            m = re.match('Approximate data size: (\d+)', line)
            if m is not None:
                result['zk_approximate_data_size'] = int(m.group(1))
                continue

            m = re.match('Open file descriptor count: (\d+)', line)
            if m is not None:
                result['zk_open_file_descriptor_count'] = int(m.group(1))
                continue

            m = re.match('Max file descriptor count: (\d+)', line)
            if m is not None:
                result['zk_max_file_descriptor_count'] = int(m.group(1))
                continue

            m = re.match('Zxid: (0x[0-9a-fA-F]+)', line)
            if m is not None:
                result['zk_zxid'] = m.group(1)
                result['zk_zxid_counter'] = int(m.group(1), 16) & int(
                    '0xffffffff', 16)  # lower 32 bits
                result['zk_zxid_epoch'] = int(m.group(1),
                                              16) >> 32  # high 32 bits
                continue

        return result
Example #50
0
    def _parse_stat(self, data):
        """ Parse the output from the 'stat' 4letter word command """
        global ZK_METRICS, ZK_LAST_METRICS

        h = StringIO(data)

        result = {}

        version = h.readline()
        if version:
            result['zk_version'] = version[version.index(':') + 1:].strip()

        # skip all lines until we find the empty one
        while h.readline().strip():
            pass

        for line in h.readlines():
            m = re.match('Latency min/avg/max: (\d+)/(\d+)/(\d+)', line)
            if m is not None:
                result['zk_min_latency'] = int(m.group(1))
                result['zk_avg_latency'] = int(m.group(2))
                result['zk_max_latency'] = int(m.group(3))
                continue

            m = re.match('Received: (\d+)', line)
            if m is not None:
                cur_packets = int(m.group(1))
                packet_delta = cur_packets - ZK_LAST_METRICS['data'].get(
                    'zk_packets_received_total', cur_packets)
                time_delta = ZK_METRICS['time'] - ZK_LAST_METRICS['time']
                time_delta = 10.0
                try:
                    result['zk_packets_received_total'] = cur_packets
                    result['zk_packets_received'] = packet_delta / float(
                        time_delta)
                except ZeroDivisionError:
                    result['zk_packets_received'] = 0
                continue

            m = re.match('Sent: (\d+)', line)
            if m is not None:
                cur_packets = int(m.group(1))
                packet_delta = cur_packets - ZK_LAST_METRICS['data'].get(
                    'zk_packets_sent_total', cur_packets)
                time_delta = ZK_METRICS['time'] - ZK_LAST_METRICS['time']
                try:
                    result['zk_packets_sent_total'] = cur_packets
                    result['zk_packets_sent'] = packet_delta / float(
                        time_delta)
                except ZeroDivisionError:
                    result['zk_packets_sent'] = 0
                continue

            m = re.match('Outstanding: (\d+)', line)
            if m is not None:
                result['zk_outstanding_requests'] = int(m.group(1))
                continue

            m = re.match('Mode: (.*)', line)
            if m is not None:
                result['zk_server_state'] = m.group(1)
                continue

            m = re.match('Node count: (\d+)', line)
            if m is not None:
                result['zk_znode_count'] = int(m.group(1))
                continue

        return result
Example #51
0
class HTTPRequest(BaseHTTPRequestHandler):
    def __init__(self, request_text):
        self.rfile = StringIO(request_text)
        self.raw_requestline = self.rfile.readline()
        self.error_code = self.error_message = None
        self.parse_request()
Example #52
0
class PeekFile:
    def __init__(self, fileobj, filename=None, encoding='auto'):
        self.fileobj = fileobj
        self._init_decodeobj(encoding)
        self.name = filename or fileobj.name

    def _init_decodeobj(self, encoding):
        self._encoding = None
        self._decode_start = 0
        self._decode_errs = 0
        if encoding == 'raw':
            self.decodeobj = self.fileobj
        else:
            if encoding == 'auto':
                magic = self.fileobj.read(4)
                # utf32 are tested first, since utf-32le BOM starts the same as utf-16le's.
                if magic in ('\x00\x00\xfe\xff', '\xff\xfe\x00\x00'):
                    self._encoding = 'UTF-32'
                elif magic[:2] in ('\xfe\xff', '\xff\xfe'):
                    self._encoding = 'UTF-16'
                elif magic.startswith('\xef\xbb\xbf'):
                    self._encoding = 'UTF-8'
                    self._decode_start = 3
            if not self._encoding:
                self._encoding = osutil.getencoding(encoding)
        self._reset_decodeobj()

    def _reset_decodeobj(self):
        self.fileobj.seek(self._decode_start)
        if self._encoding:
            self.decodeobj = codecs.getreader(self._encoding)(
                self.fileobj, errors='markbadbytes')
            if not strutil.codec_supports_readline(self._encoding):
                # print 'codec %s doesn't support readline, hacking it.' % self._encoding
                try:
                    self.decodeobj = StringIO(self.decodeobj.read())
                except UnicodeError:
                    self.decodeobj = StringIO(u'')
                    self._decode_errs = 1
        self._prevlineend = None

    def _readline(self, *args):
        line = self.decodeobj.readline(*args)
        # work around corrupted files that have crcrlf line endings.  (With StreamReaders in python versions >= 2.4, you no longer get it all as one line.)
        if self._prevlineend == '\r' and line == '\r\n':
            self._prevlineend = None
            return self._readline(*args)
        self._prevlineend = line[-1:]
        if self._encoding:
            badbytecount = line.count(_badbytesmarker)
            if badbytecount:
                raise UnicodeError('%r codec: %i decode errors' %
                                   (self._encoding, badbytecount))
        return line

    def peek(self, *args):
        self.fileobj.seek(0)
        return self.fileobj.read(*args)

    def peekdecoded(self, *args):
        self._reset_decodeobj()
        try:
            return self.decodeobj.read(*args)
        except UnicodeError:
            self._decode_errs = 1
            return u''

    def peekline(self, *args):
        self._reset_decodeobj()
        try:
            return self._readline(*args)
        except UnicodeError:
            self._decode_errs = 1
            return u''

    def peeknextline(self, *args):
        try:
            return self._readline(*args)
        except UnicodeError:
            self._decode_errs = 1
            return u''

    def _done_peeking(self, raw):
        if raw:
            fileobj = self.fileobj
            fileobj.seek(0)
            del self.decodeobj
        else:
            self._reset_decodeobj()
            fileobj = self.decodeobj
            del self.fileobj
        self.peeknextline = None
        self.peekdecoded = None
        self.peekline = None
        self.peek = None
        self.readline = self._readline
        self.read = fileobj.read
        self.seek = fileobj.seek

    def seek(self, *args):
        self._done_peeking(raw=1)
        return self.seek(*args)

    def readline(self, *args):
        self._done_peeking(raw=0)
        return self._readline(*args)

    def read(self, *args):
        self._done_peeking(raw=1)
        return self.read(*args)
Example #53
0
def read_SES3D(file_or_file_object, *args, **kwargs):
    """
    Turns a SES3D file into a obspy.core.Stream object.

    SES3D files do not contain a starttime and thus the first first sample will
    always begin at 1970-01-01T00:00:00.

    The data will be a floating point array of the ground velocity in meters
    per second.

    Furthermore every trace will have a trace.stats.ses3d dictionary which
    contains the following six keys:
        * receiver_latitude
        * receiver_longitde
        * receiver_depth_in_m
        * source_latitude
        * source_longitude
        * source_depth_in_m

    The network, station, and location attributes of the trace will be empty,
    and the channel will be set to either 'X' (south component), 'Y' (east
    component), or 'Z' (vertical component).
    """
    # Make sure that it is a file like object.
    if not hasattr(file_or_file_object, "read"):
        with open(file_or_file_object, "rb") as open_file:
            file_or_file_object = StringIO(open_file.read())

    # Read the header.
    component = file_or_file_object.readline().split()[0].lower()
    npts = int(file_or_file_object.readline().split()[-1])
    delta = float(file_or_file_object.readline().split()[-1])
    # Skip receiver location line.
    file_or_file_object.readline()
    rec_loc = file_or_file_object.readline().split()
    rec_x, rec_y, rec_z = map(float, [rec_loc[1], rec_loc[3], rec_loc[5]])
    # Skip the source location line.
    file_or_file_object.readline()
    src_loc = file_or_file_object.readline().split()
    src_x, src_y, src_z = map(float, [src_loc[1], src_loc[3], src_loc[5]])

    # Read the data.
    data = np.array(map(float, file_or_file_object.readlines()),
                    dtype="float32")

    # Setup Obspy Stream/Trace structure.
    tr = Trace(data=data)
    tr.stats.delta = delta
    # Map the channel attributes.
    tr.stats.channel = {"theta": "X", "phi": "Y", "r": "Z"}[component]
    tr.stats.ses3d = AttribDict()
    tr.stats.ses3d.receiver_latitude = rotations.colat2lat(rec_x)
    tr.stats.ses3d.receiver_longitude = rec_y
    tr.stats.ses3d.receiver_depth_in_m = rec_z
    tr.stats.ses3d.source_latitude = rotations.colat2lat(src_x)
    tr.stats.ses3d.source_longitude = src_y
    tr.stats.ses3d.source_depth_in_m = src_z
    # Small check.
    if npts != tr.stats.npts:
        msg = "The sample count specified in the header does not match " + \
            "the actual data count."
        warnings.warn(msg)
    return Stream(traces=[tr])
Example #54
0
    class Builder(object):
        def __init__(self):
            self._buffer = StringIO()
            self._state = 'request'
            self._pos = 0
            self.content_length = 0
            self.http_request = HttpRequest()

        def is_ready(self):
            return self._state == 'done'

        def write(self, data):
            self._buffer.seek(0, os.SEEK_END)
            self._buffer.write(data)
            self._parse()

        def _parse(self):
            while self._pos != self._buffer.len:
                self._buffer.seek(self._pos)

                if self._state == 'body':
                    self._buffer.seek(self._pos)
                    # FIXME: Efficiency
                    data = self._buffer.read()
                    self.content_length -= len(data)
                    self.http_request.body += data
                    self._pos = self._buffer.pos
                    self.http_request.raw = self._buffer.getvalue()

                    if self.content_length <= 0:
                        self._state = 'done'

                    return self.http_request

                line = self._buffer.readline()
                if not line.endswith('\n'):
                    return

                self._pos = self._buffer.pos

                if self._state == 'request':
                    match = re.match("(GET|POST|PUT|DELETE|HEAD) (.+) \w+",
                                     line)
                    if match is None:
                        raise ValueError("Invalid request line: %s" % line)
                    self.http_request.method = match.group(1)
                    self.http_request.path = match.group(2)
                    self._state = 'headers'
                elif self._state == 'headers':
                    match = re.match("^\s*$", line)
                    if match is not None:
                        if self.content_length:
                            self._state = 'body'
                            continue
                        else:
                            self._state = 'done'
                            self.http_request.raw = self._buffer.getvalue()
                            return self.http_request

                    match = re.match("(.+): (.+)", line)
                    if match is None:
                        raise ValueError("Invalid header: %s" % line)
                    self.http_request.headers[match.group(1)] = match.group(2)
                    if match.group(1).lower() == 'content-length':
                        self.content_length = int(match.group(2))
Example #55
0
    def importer(self, simuler=True):

        #
        # VARIABLES DE CONFIGURATION
        #

        frequence_commit = 100  # Enregistrer (commit) tous les n enregistrements

        model = self.type_import  # On récupère l'objet (model) à importer indiqué dans le champ type d'import
        model_obj = self.env[model]

        if model == 'product.template':
            nom_objet = 'article'  # Libellé pour affichage dans message information/erreur
            champ_primaire = 'default_code'  # Champ sur lequel on se base pour détecter si enregistrement déjà existant (alors mise à jour) ou inexistant (création)
            champ_reference = 'default_code'  # Champ qui contient la référence ( ex : référence du produit, d'un client, ...) pour ajout du préfixe devant
        elif model == 'res.partner':
            nom_objet = 'partenaire'  # Libellé pour affichage dans message information/erreur
            champ_primaire = 'ref'  # Champ sur lequel on se base pour détecter si enregistrement déjà existant (alors mise à jour) ou inexistant (création)
            champ_reference = 'ref'  # Champ qui contient la référence ( ex : référence du produit, d'un client, ...) pour ajout du préfixe devant
            # 2 champs suivants : on récupère les id des types de compte comptable payable et recevable pour création comptes comptables clients et fournisseurs (généralement 411 et 401).
            res_model, data_account_type_receivable_id = self.env[
                'ir.model.data'].get_object_reference(
                    'account', 'data_account_type_receivable')
            res_model, data_account_type_payable_id = self.env[
                'ir.model.data'].get_object_reference(
                    'account', 'data_account_type_payable')
        elif model == 'of.service':
            nom_objet = 'service OpenFire'  # Libellé pour affichage dans message information/erreur
            champ_primaire = 'id'  # Champ sur lequel on se base pour détecter si enregistrement déjà existant (alors mise à jour) ou inexistant (création)
            champ_reference = ''  # Champ qui contient la référence ( ex : référence du produit, d'un client, ...) pour ajout du préfixe devant
        elif model == 'res.partner.bank':
            nom_objet = 'Comptes en banque partenaire'  # Libellé pour affichage dans message information/erreur
            champ_primaire = 'acc_number'  # Champ sur lequel on se base pour détecter si enregistrement déjà existant (alors mise à jour) ou inexistant (création)
            champ_reference = ''  # Champ qui contient la référence ( ex : référence du produit, d'un client, ...) pour ajout du préfixe devant
        elif model == 'crm.lead':
            nom_objet = u'partenaire/opportunité'  # Libellé pour affichage dans message information/erreur
            champ_primaire = 'of_ref'  # Champ sur lequel on se base pour détecter si enregistrement déjà existant (alors mise à jour) ou inexistant (création)
            champ_reference = 'of_ref'  # Champ qui contient la référence ( ex : référence du produit, d'un client, ...) pour ajout du préfixe devant

        # Initialisation variables
        champs_odoo = self.get_champs_odoo(
            model
        )  # On récupère la liste des champs de l'objet (depuis ir.model.fields)
        date_debut = time.strftime('%Y-%m-%d %H:%M:%S')

        if simuler:
            sortie_succes = sortie_avertissement = sortie_erreur = u"SIMULATION - Rien n'a été créé/modifié.\n"
        else:
            sortie_succes = sortie_avertissement = sortie_erreur = u""

        nb_total = 0
        nb_ajout = 0
        nb_maj = 0
        nb_echoue = 0
        erreur = 0

        #
        # LECTURE DU FICHIER D'IMPORT
        #

        # Lecture du fichier d'import par la bibliothèque csv de python
        fichier = base64.decodestring(self.file)
        dialect = csv.Sniffer().sniff(
            fichier
        )  # Deviner automatiquement les paramètres : caractère séparateur, type de saut de ligne, ...
        fichier = StringIO(fichier)

        # On prend le séparateur indiqué dans le formulaire si est renseigné.
        # Sinon on prend celui deviné par la bibliothèque csv, et si vierge, on prend le ; par défaut.

        if self.separateur and self.separateur.strip(' '):
            dialect.delimiter = str(
                self.separateur.strip(' ').replace('\\t', '\t'))
        else:
            if dialect.delimiter and dialect.delimiter.strip(' '):
                self.separateur = dialect.delimiter.replace('\t', '\\t')
            else:
                self.separateur = dialect.delimiter = ';'

#
# ANALYSE DES CHAMPS DU FICHIER D'IMPORT
#

# On récupère la 1ère ligne du fichier (liste des champs) pour vérifier si des champs existent en plusieurs exemplaires
        ligne = fichier.readline().strip().decode('utf8', 'ignore').split(
            dialect.delimiter
        )  # Liste des champs de la 1ère ligne du fichier d'import

        # Vérification si le champ primaire est bien dans le fichier d'import (si le champ primaire est défini)
        if champ_primaire and champ_primaire not in ligne:
            erreur = 1
            sortie_erreur += u"Le champ référence qui permet d'identifier un %s (%s) n'est pas dans le fichier d'import.\n" % (
                nom_objet, champ_primaire)

        # Vérification si il y a des champs du fichier d'import qui sont en plusieurs exemplaires
        # et détection champ relation (id, id externe, nom)
        doublons = {}
        for champ_fichier in ligne:
            champ_fichier = champ_fichier.strip(
                dialect.quotechar
            )  # On enlève les séparateurs de texte (souvent guillemet ou apostrophe) aux extrimités de la chaine.

            # Récupération du champ relation si est indiqué (dans le nom du champ après un /)
            champ_relation = champ_fichier[champ_fichier.rfind('/') +
                                           1 or len(champ_fichier):].strip(
                                           )  # On le récupère.

            if champ_relation:  # Si est défini, on le retire du nom du champ.
                champ_fichier = champ_fichier[
                    0:champ_fichier.rfind('/') if champ_fichier.
                    rfind('/') != -1 else len(champ_fichier)].strip()

            if champ_fichier in doublons:
                doublons[champ_fichier] = doublons[champ_fichier] + 1
            else:
                doublons[champ_fichier] = 1

            # Test si est un champ de l'objet (sinon message d'information que le champ est ignoré à l'import)
            if champ_fichier not in champs_odoo:
                sortie_avertissement += u"Info : colonne \"%s\" dans le fichier d'import non reconnue. Ignorée lors de l'import.\n" % champ_fichier
            else:
                # Vérification que le champ relation (si est indiqué) est correct.
                if champ_relation and champs_odoo[champ_fichier]['type'] in (
                        'many2one'
                ) and not champs_odoo[champ_fichier]['relation_champ']:
                    if not self.env['ir.model.fields'].search([
                            '&', ('model', '=', model),
                        ('name', '=', champ_relation)
                    ]):
                        sortie_erreur += u"Le champ relation \"%s\" (après le /) de la colonne \"%s\" n'existe pas.\n" % (
                            champ_relation, champ_fichier)
                        erreur = 1
                    else:
                        champs_odoo[champ_fichier][
                            'relation_champ'] = champ_relation
                elif champ_relation:
                    sortie_erreur += u"Un champ relation (après le /) dans la colonne \"%s\" n'est pas possible pour ce champ.\n" % champ_fichier
                    erreur = 1

        for champ_fichier in doublons:
            # On affiche un message d'avertissement si le champ existe en plusieurs exemplaires et si c'est un champ connu à importer
            if champ_fichier in champs_odoo and doublons[champ_fichier] > 1:
                sortie_erreur += u"La colonne \"%s\" dans le fichier d'import existe en %s exemplaires.\n" % (
                    champ_fichier, doublons[champ_fichier])
                erreur = 1

        if erreur:  # On arrête si erreur
            self.write({
                'nb_total': nb_total,
                'nb_ajout': nb_ajout,
                'nb_maj': nb_maj,
                'nb_echoue': nb_echoue,
                'sortie_succes': sortie_succes,
                'sortie_avertissement': sortie_avertissement,
                'sortie_erreur': sortie_erreur
            })
            return

        # On ajoute le séparateur (caractère souligné) entre le préfixe et la référence si il n'a pas déjà été mis.
        prefixe = self.prefixe and self.prefixe.encode("utf-8") or ''
        if prefixe and prefixe[-1:] != '_':
            prefixe = prefixe + '_'

        fichier.seek(0, 0)  # On remet le pointeur au début du fichier
        fichier = csv.DictReader(
            fichier, dialect=dialect
        )  # Renvoi une liste de dictionnaires avec le nom du champ_fichier comme clé

        doublons = {
        }  # Variable pour test si enregistrement en plusieurs exemplaires dans fichier d'import
        i = 1  # No de ligne

        #
        # IMPORT ENREGISTREMENT PAR ENREGISTREMENT
        #

        # On parcourt le fichier enregistrement par enregistrement
        for ligne in fichier:
            ligne = {
                key.decode('utf8', 'ignore'): value.decode('utf8', 'ignore')
                for key, value in ligne.iteritems()
            }
            i = i + 1
            nb_total = nb_total + 1
            erreur = 0

            # On rajoute le préfixe devant la valeur du champ référence de l'objet (si existe).
            if champ_reference and champ_reference in ligne:
                ligne[champ_reference] = prefixe + ligne[champ_reference]

    #
    # PARCOURS DE TOUS LES CHAMPS DE L'ENREGISTREMENT
    #

            valeurs = {
            }  # Variables qui récupère la valeur des champs à importer (pour injection à fonction create ou update)

            # Parcours de tous les champs de la ligne
            for champ_fichier in ligne:

                # Pour import articles, champ price est le coût d'achat à mettre dans le prix fournisseur.
                # On l'ignore car sera récupéré avec le champ seller_ids (fournisseur), voir plus bas.
                if model == 'product.template' and champ_fichier == 'price':
                    continue

                # Si le nom du champ contient un champ relation c'est à dire se terminant par /nom_du_champ on l'enlève.
                champ_fichier_sansrel = champ_fichier[
                    0:champ_fichier.rfind('/') if champ_fichier.
                    rfind('/') != -1 else len(champ_fichier)].strip()

                if champ_fichier_sansrel in champs_odoo:  # On ne récupère que les champs du fichier d'import qui sont des champs de l'objet (on ignore les champs inconnus du fichier d'import).
                    # Valeur du champ : suppression des espaces avant et après et conversion en utf8.
                    ligne[champ_fichier] = ligne[champ_fichier].strip()

                    if ligne[champ_fichier].strip().lower() == "#vide":
                        ligne[champ_fichier] = "#vide"

        #
        # VÉRIFICATION DE L'INTÉGRITÉ DE LA VALEUR DES CHAMPS
        # POUR LES CRITÈRES QUI NE DÉPENDENT PAS DU TYPE DU CHAMP
        #

        # si le champs est requis, vérification qu'il est renseigné
                    if champs_odoo[champ_fichier_sansrel]['requis'] and (
                            ligne[champ_fichier] == ""
                            or ligne[champ_fichier] == "#vide"):
                        sortie_erreur += u"Ligne %s : champ %s (%s) vide alors que requis. %s non importé.\n" % (
                            i,
                            champs_odoo[champ_fichier_sansrel]['description'],
                            champ_fichier, nom_objet.capitalize())
                        erreur = 1

                    # Si le champ relation est un id, vérification qu'est un entier
                    if champs_odoo[champ_fichier_sansrel][
                            'relation_champ'] == 'id':
                        try:
                            int(ligne[champ_fichier])
                        except ValueError:
                            sortie_erreur += u"Ligne %s : champ %s (%s) n'est pas un id (nombre entier) alors que le champ relation (après le /) est un id. %s non importé.\n" % (
                                i, champs_odoo[champ_fichier_sansrel]
                                ['description'], champ_fichier,
                                nom_objet.capitalize())
                            erreur = 1
                            continue

        #
        # FORMATAGE ET VÉRIFICATION DE L'INTÉGRITÉ DE LA VALEUR DES CHAMPS
        # POUR LES CRITÈRES QUI DÉPENDENT DU TYPE DU CHAMP
        #

        # Si est un float
                    if champs_odoo[champ_fichier_sansrel]['type'] == 'float':
                        ligne[champ_fichier] = ligne[champ_fichier].replace(
                            ',', '.')
                        try:
                            float(ligne[champ_fichier])
                            valeurs[champ_fichier_sansrel] = ligne[
                                champ_fichier]
                        except ValueError:
                            sortie_erreur += u"Ligne %s : champ %s (%s) n'est pas un nombre. %s non importé.\n" % (
                                i, champs_odoo[champ_fichier_sansrel]
                                ['description'], champ_fichier_sansrel,
                                nom_objet.capitalize())
                            erreur = 1

                    # Si est un field selection
                    elif champs_odoo[champ_fichier_sansrel][
                            'type'] == 'selection':
                        # C'est un champ sélection. On vérifie que les données sont autorisées.
                        if ligne[champ_fichier] not in dict(
                                self.env[model]._fields[champ_fichier].
                                selection):
                            sortie_erreur += u"Ligne %s : champ %s (%s) valeur \"%s\" non autorisée. %s non importé.\n" % (
                                i, champs_odoo[champ_fichier_sansrel]
                                ['description'], champ_fichier,
                                ligne[champ_fichier], nom_objet.capitalize())
                            erreur = 1
                        else:
                            valeurs[champ_fichier_sansrel] = ligne[
                                champ_fichier]

                    # Si est un boolean
                    elif champs_odoo[champ_fichier_sansrel][
                            'type'] == 'boolean':
                        if ligne[champ_fichier].upper() in ('1', "TRUE",
                                                            "VRAI"):
                            ligne[champ_fichier] = True
                        elif ligne[champ_fichier].upper() in ('0', "FALSE",
                                                              "FAUX"):
                            ligne[champ_fichier] = False
                        else:
                            sortie_erreur += u"Ligne %s : champ %s (%s) valeur \"%s\" non autorisée (admis 0, 1, True, False, vrai, faux). %s non importé.\n" % (
                                i, champs_odoo[champ_fichier_sansrel]
                                ['description'], champ_fichier,
                                ligne[champ_fichier], nom_objet.capitalize())
                            erreur = 1

                    # si est un many2one
                    elif champs_odoo[champ_fichier_sansrel][
                            'type'] == 'many2one':
                        if ligne[champ_fichier] == "#vide" and not champs_odoo[
                                champ_fichier_sansrel]['requis']:
                            # Si le champ n'est pas obligatoire et qu'il est vide, on met une valeur vide.
                            valeurs[champ_fichier_sansrel] = ""
                        elif ligne[champ_fichier] != "":
                            # Si import partenaires et si c'est le compte comptable client ou fournisseur, on regarde si pointe sur un compte comptable existant
                            if model == 'res.partner' and champ_fichier == 'property_account_receivable_id':
                                res_ids = self.env[
                                    champs_odoo[champ_fichier_sansrel]
                                    ['relation']].with_context(
                                        active_test=False).search([
                                            '&',
                                            ('code', '=',
                                             ligne[champ_fichier]),
                                            ('internal_type', '=',
                                             'receivable')
                                        ])
                            elif model == 'res.partner' and champ_fichier == 'property_account_payable_id':
                                res_ids = self.env[
                                    champs_odoo[champ_fichier_sansrel]
                                    ['relation']].with_context(
                                        active_test=False).search([
                                            '&',
                                            ('code', '=',
                                             ligne[champ_fichier]),
                                            ('internal_type', '=', 'payable')
                                        ])
                            else:
                                if ligne[champ_fichier] == "#vide":
                                    res_ids = ""
                                else:
                                    res_ids = self.env[
                                        champs_odoo[champ_fichier_sansrel]
                                        ['relation']].with_context(
                                            active_test=False
                                        ).search([
                                            (champs_odoo[champ_fichier_sansrel]
                                             ['relation_champ'] or 'name', '=',
                                             ligne[champ_fichier])
                                        ])

                            if len(res_ids) == 1:
                                valeurs[champ_fichier_sansrel] = res_ids.id
                            elif len(res_ids) > 1:
                                sortie_erreur += u"Ligne %s : champ %s (%s) valeur \"%s\" a plusieurs correspondances. %s non importé.\n", (
                                    i, champs_odoo[champ_fichier_sansrel]
                                    ['description'],
                                    champ_fichier, ligne[champ_fichier],
                                    nom_objet.capitalize())
                                erreur = 1
                            else:
                                # Si import de partenaires et champ compte comptable (client et fournisseur), on le créer.
                                if model == 'res.partner' and champ_fichier == 'property_account_receivable_id' and 'name' in ligne:
                                    if not simuler:
                                        valeurs[champ_fichier_sansrel] = self.env[
                                            champs_odoo[champ_fichier_sansrel]
                                            ['relation']].create({
                                                'name':
                                                ligne['name'],
                                                'code':
                                                ligne[champ_fichier],
                                                'reconcile':
                                                True,
                                                'user_type_id':
                                                data_account_type_receivable_id
                                            })
                                elif model == 'res.partner' and champ_fichier == 'property_account_payable_id' and 'name' in ligne:
                                    if not simuler:
                                        valeurs[
                                            champ_fichier_sansrel] = self.env[
                                                champs_odoo[
                                                    champ_fichier_sansrel]
                                                ['relation']].create({
                                                    'name':
                                                    ligne['name'],
                                                    'code':
                                                    ligne[champ_fichier],
                                                    'reconcile':
                                                    True,
                                                    'user_type_id':
                                                    data_account_type_payable_id
                                                })
                                elif ligne[champ_fichier] == "#vide":
                                    valeurs[champ_fichier_sansrel] = ''
                                else:
                                    sortie_erreur += u"Ligne %s : champ %s (%s) valeur \"%s\" n'a pas de correspondance. %s non importé.\n" % (
                                        i, champs_odoo[champ_fichier_sansrel]
                                        ['description'], champ_fichier,
                                        ligne[champ_fichier],
                                        nom_objet.capitalize())
                                    erreur = 1

                    # Si est un one2many
                    elif champs_odoo[champ_fichier_sansrel][
                            'type'] == 'one2many':
                        # Cas des fournisseurs pour les produits. Il y a un objet intermédiaire avec un enregistrement pour chaque produit.
                        # On crée le fournisseur dans cet objet en renseignant le prix d'achat
                        if model == 'product.template' and champ_fichier == 'seller_ids':
                            res_ids = self.env['res.partner'].search([
                                '&', ('name', '=', ligne[champ_fichier]),
                                ('supplier', '=', True)
                            ])
                            if len(res_ids) == 1:
                                if 'price' in ligne:
                                    valeurs[champ_fichier_sansrel] = [
                                        (5, ),
                                        (0, 0, {
                                            'name':
                                            res_ids.id,
                                            'price':
                                            ligne['price'].replace(',', '.')
                                        })
                                    ]
                                else:
                                    valeurs[champ_fichier_sansrel] = [
                                        (5, ), (0, 0, {
                                            'name': res_ids.id
                                        })
                                    ]
                            elif len(res_ids) > 1:
                                sortie_erreur += u"Ligne %s : champ %s (%s) valeur \"%s\" a plusieurs correspondances. %s non importé.\n" % (
                                    i, champs_odoo[champ_fichier_sansrel]
                                    ['description'], champ_fichier,
                                    ligne[champ_fichier].strip(),
                                    nom_objet.capitalize())
                                erreur = 1
                            else:
                                sortie_erreur += u"Ligne %s : champ %s (%s) valeur \"%s\" n'a pas de correspondance. %s non importé.\n" % (
                                    i, champs_odoo[champ_fichier_sansrel]
                                    ['description'], champ_fichier,
                                    ligne[champ_fichier].strip(),
                                    nom_objet.capitalize())
                                erreur = 1

                    # Si est un many2many
                    elif champs_odoo[champ_fichier_sansrel][
                            'type'] == 'many2many':
                        # C'est un many2many
                        # Ça équivaut à des étiquettes. On peut en importer plusieurs en les séparant par des virgules.
                        # Ex : étiquette1, étiquette2, étiquette3
                        tag_ids = []
                        if ligne[champ_fichier] and ligne[
                                champ_fichier] != "#vide":  # S'il y a des données dans le champ d'import
                            ligne[champ_fichier] = ligne[champ_fichier].split(
                                ','
                            )  # On sépare les étiquettes quand il y a une virgule
                            for tag in ligne[
                                    champ_fichier]:  # On parcourt les étiquettes à importer
                                # On regarde si elle existe.
                                res_ids = self.env[
                                    champs_odoo[champ_fichier_sansrel]
                                    ['relation']].with_context(
                                        active_test=False).search([
                                            (champs_odoo[champ_fichier_sansrel]
                                             ['relation_champ']
                                             or 'name', '=', tag.strip())
                                        ])
                                if len(res_ids) == 1:
                                    tag_ids.append(res_ids.id)
                                elif len(res_ids) > 1:
                                    sortie_erreur += u"Ligne %s : champ %s (%s) valeur \"%s\" a plusieurs correspondances. %s non importé.\n" % (
                                        i, champs_odoo[champ_fichier_sansrel]
                                        ['description'], champ_fichier,
                                        tag.strip(), nom_objet.capitalize())
                                    erreur = 1
                                else:
                                    sortie_erreur += u"Ligne %s : champ %s (%s) valeur \"%s\" n'a pas de correspondance. %s non importé.\n" % (
                                        i, champs_odoo[champ_fichier_sansrel]
                                        ['description'], champ_fichier,
                                        tag.strip(), nom_objet.capitalize())
                                    erreur = 1
                        if not erreur:
                            if ligne[champ_fichier] == "#vide":
                                valeurs[champ_fichier_sansrel] = [(5, )]
                            elif ligne[champ_fichier]:
                                valeurs[champ_fichier_sansrel] = [(6, 0,
                                                                   tag_ids)]

                    # Pour tous les autres types de champ (char, text, date, ...)
                    # On ne fait que prendre sa valeur sans traitement particulier
                    else:
                        if ligne[champ_fichier] == "#vide":
                            valeurs[champ_fichier_sansrel] = ''
                        elif ligne[champ_fichier] != '':
                            valeurs[champ_fichier_sansrel] = ligne[
                                champ_fichier]

            if erreur:  # On n'enregistre pas si erreur.
                nb_echoue = nb_echoue + 1
                continue

            # On regarde si l'enregistrement a déjà été importé (réf. en plusieurs exemplaires dans le fichier d'import).
            # Si c'est le cas, on l'ignore.
            if champ_primaire:
                if ligne[champ_primaire] in doublons:
                    doublons[ligne[champ_primaire]][0] = doublons[
                        ligne[champ_primaire]][0] + 1
                    doublons[ligne[champ_primaire]][1] = doublons[
                        ligne[champ_primaire]][1] + ", " + str(i)
                    nb_echoue = nb_echoue + 1
                    continue
                else:
                    doublons[ligne[champ_primaire]] = [1, str(i)]

                # On regarde si l'enregistrement existe déjà dans la base
                res_objet_ids = model_obj.with_context(
                    active_test=False).search([(champ_primaire, '=',
                                                ligne[champ_primaire])])
            else:
                res_objet_ids = ""  # Si le champ primaire n'est pas défini, on ne fait que des créations.

            libelle_ref = u"réf. " + ligne[
                champ_reference] if champ_reference else ligne[
                    champ_primaire] if champ_primaire else ligne[
                        'name'] if 'name' in ligne else ''

            if not res_objet_ids:
                # L'enregistrement n'existe pas dans la base, on l'importe (création)
                # Mais en cas de création, on doit vérifier que tous les champs Odoo requis ont bien été renseignés.
                for cle in champs_odoo:
                    if champs_odoo[cle][
                            'requis'] == True and cle not in valeurs:
                        sortie_erreur += u"Ligne %s : champ %s (%s) obligatoire mais non présent dans le fichier d'import. %s non importé.\n" % (
                            i, champs_odoo[cle]['description'], cle,
                            nom_objet.capitalize())
                        erreur = 1

                if erreur:  # On n'enregistre pas si erreur.
                    nb_echoue = nb_echoue + 1
                    continue

                try:
                    if not simuler:
                        model_obj.create(valeurs)
                    nb_ajout = nb_ajout + 1
                    sortie_succes += u"Création %s %s (ligne %s)\n" % (
                        nom_objet, libelle_ref, i)
                except Exception, exp:
                    sortie_erreur += u"Ligne %s : échec création %s %s - Erreur : %s\n" % (
                        i, nom_objet, libelle_ref, exp)
                    nb_echoue = nb_echoue + 1

            elif len(res_objet_ids) == 1:
                # Il a un (et un seul) enregistrement dans la base avec cette référence. On le met à jour.
                try:
                    if not simuler:
                        res_objet_ids.write(valeurs)
                    nb_maj = nb_maj + 1
                    sortie_succes += u"MAJ %s %s (ligne %s)\n" % (
                        nom_objet, libelle_ref, i)
                except Exception, exp:
                    sortie_erreur += u"Ligne %s : échec mise à jour %s %s - Erreur : %s\n" % (
                        i, nom_objet, libelle_ref, exp)
                    nb_echoue = nb_echoue + 1
Example #56
0
class HttpConnection(object):
    def __init__(self, sock):
        self._buffer = StringIO()
        self._socket = sock
        self.url = None
        self.headers = {}

    @silent_fail(log=True)
    def on_data(self, data):
        try:
            self._buffer.write(data)
            all_data = self._buffer.getvalue()
            if '\r\n\r\n' in all_data:
                if self._parse_request():
                    self._send_upstream_request()
        except:
            self._socket.close()
            raise

    def on_close(self):
        pass

    def send(self, data):
        self._socket.send(data)

    def _parse_request(self):
        self._buffer.seek(0)
        request_line = self._buffer.readline(
        )  # e.g. GET /v?v='http://www.nbc.com' HTTP/1.1

        match = re.match("(GET|POST|PUT|DELETE|HEAD) (.+) .+", request_line)
        if match is None:
            raise ValueError("Invalid HTTP request %s" % request_line)

        self.method = match.group(1)  # e.g. GET

        whole_url = match.group(2)  # e.g. /?v='http://www.nbc.com'

        parsed_whole_url = urlparse.urlparse(whole_url)
        params = urlparse.parse_qs(parsed_whole_url.query)

        url_param = params.get('v', None)  # e.g. http://www.nbc.com
        if url_param is None or len(url_param) == 0:
            logging.warning("Skipping %s" % whole_url.strip())
            return
            # raise ValueError("No request url received in HTTP request")
        self.url = url_param[0]

        for line in self._buffer.readlines():
            match = re.match("(.+):\\s*(.+)", line.strip())
            if match is None:
                continue
            self.headers[match.group(1)] = match.group(2)

        return True

    def _send_upstream_request(self):
        def on_response(response):
            raw_response = response.get_raw()
            raw_response = re.sub(
                '((?:src|href|ng-src|ng-href)\s*=\s*)[\'"]((?:(?:http|https):/)?.*/.*)[\'"]',
                r'\1"http://127.0.0.1:9000/?v=\2"', raw_response)
            self.send(raw_response)
            self._socket.close()

        logging.info("%s %s" % (self.method, self.url))
        request(self.url,
                method=self.method,
                headers=self.headers,
                callback=on_response)
Example #57
0
class FileWrapper:
    def __init__(self, fp, pre='', post=''):
        self.fp = fp
        self.pre = StringIO(pre)
        self.post = StringIO(post)
        self.closed = False
        self.mode = "r"

    def read(self, bytes=sys.maxint):
        bytes = int(bytes)
        if self.closed: raise ValueError("I/O operation on closed file")

        preBytes = self.pre.read(bytes)
        if len(preBytes) < bytes:
            fpBytes = self.fp.read(bytes - len(preBytes))
        else:
            fpBytes = ''

        if len(preBytes) + len(fpBytes) < bytes:
            postBytes = self.post.read(bytes - (len(preBytes) + len(fpBytes)))
        else:
            postBytes = ''

        return preBytes + fpBytes + postBytes

    def readline(self):
        if self.closed: raise ValueError("I/O operation on closed file")

        output = self.pre.readline()
        if len(output) == 0 or output[-1] != "\n":
            output += self.fp.readline()
        if len(output) == 0 or output[-1] != "\n":
            output += self.post.readline()

        return output

    def readlines(self):
        raise NotImplementedError()

    def __iter__(self):

        line = self.readline()
        while line != '':
            yield line
            line = self.readline()

    def seek(self):
        raise NotImplementedError()

    def write(self):
        raise NotImplementedError()

    def writelines(self):
        raise NotImplementedError()

    def tell(self):
        return self.pre.tell() + self.fp.tell() + self.post.tell()

    def close(self):
        self.closed = True
        self.fp.close()
Example #58
0
class Input(object):
    def __init__(self, read, chunked_input=False):
        self.reader = iter(read())
        self.current_chunk = None

    def read_rest(self, length, line=False):
        if self.current_chunk:
            data = self.current_chunk.readline(length) if line \
                else self.current_chunk.read(length)
            if not data:
                self.current_chunk = None
            else:
                return data
        return ''

    def read(self, length=None):
        buffer = []
        data = self.read_rest(length)
        if data:
            length -= len(data)
            buffer.append(data)

        if length is None:
            self.current_chunk = None
            for data in self.reader:
                buffer.append(self.reader.next())
        else:
            rest = None
            while True:
                v = self.reader.next()
                if not v:
                    break
                length -= len(v)
                if length <= 0:
                    if length < 0:
                        rest = v[(-length):]
                        v = v[:-length]
                    buffer.append(v)
                    break
                buffer.append(v)

            if rest:
                self.current_chunk = StringIO(rest)

        return ''.join(buffer)

    def readline(self, length=None):
        data = self.read_rest(length, True)
        if data:
            return data

        v = self.reader.next()
        if not v:
            return v

        self.current_chunk = StringIO(v)
        return self.current_chunk.readline(length)

    def readlines(self, hint=None):
        return list(self)

    def __iter__(self):
        return self

    def next(self):
        line = self.readline()

        if not line:
            raise StopIteration
        return line
class SplitterBase(object):
    class BlockClassesIncompatible(Exception):
        pass

    def __init__(self,
                 source,
                 block_classes,
                 include_tags=False,
                 filler_blocks=False,
                 filler_block_class=FillerBlock):
        try:
            source.readline
            self.source = source
        except AttributeError:
            self.source = StringIO()
            self.source.write(source)
            self.source.seek(0)
        self.current_string = ''
        self.block_classes = block_classes
        self.create_slug_to_bc()
        self.finished_blocks = []
        self.current_block = None
        self.filler_blocks = filler_blocks
        self.filler_block_class = filler_block_class
        self.filler_block = None
        self.start_pattern = re.compile(self.get_start_str(),
                                        re.UNICODE | re.DOTALL)
        self.include_tags = include_tags

    def create_slug_to_bc(self):
        self.slug_to_bc = {}
        self.max_num_lines = 1
        for bc in self.block_classes:
            no_lines = bc.start_pattern.count('\n') + 1
            if hasattr(bc, 'stop_pattern'):
                no_lines = max(no_lines, bc.stop_pattern.count('\n') + 1)
            self.max_num_lines = max(no_lines, self.max_num_lines)
            if bc.slug in self.slug_to_bc:
                raise self.BlockClassesIncompatible()
            self.slug_to_bc[bc.slug] = bc

    def match_to_bc(self, match):
        gd = match.groupdict()
        for key, value in gd.items():
            if key[:5] == 'SLUG_' and value:
                return self.slug_to_bc[key[5:]]
        raise ValueError('match does not contain a slug')

    def top_up_current_string(self):
        if self.current_string == '':
            num_lines = 0
        else:
            # Don't count line breaks at end of the string
            num_lines = self.current_string[:-1].count('\n') + 1
        if self.max_num_lines > num_lines:
            new_lines = [self.current_string]
            for i in range(num_lines, self.max_num_lines):
                new_line = self.source.readline()
                if new_line:
                    new_lines.append(new_line)
            self.current_string = ''.join(new_lines)

    def process_groupdict(self, groupdict):
        before = groupdict['before']
        tag = groupdict['tag']
        remainder = groupdict['remainder']
        true_tag = groupdict.get('true_tag', tag)
        pre_tag = groupdict.get('pre_tag', '')
        post_tag = groupdict.get('post_tag', '')
        if tag != pre_tag + true_tag + post_tag:
            raise SplittingError()
        return {
            'before': before + pre_tag,
            'tag': true_tag,
            'remainder': post_tag + remainder,
        }

    def get_start_str(self):
        options = [
            '(?P<%s>%s)' % ('SLUG_' + bc.slug, bc.start_pattern)
            for bc in self.block_classes
        ]
        options = '|'.join(options)
        start_string = '^(?P<before>.*?)(?P<tag>%s)(?P<remainder>.*)$' % options
        return start_string

    def chop_off_first_line(self):
        # Chop off first line from current_string
        # Not an efficient method I think.
        pattern = re.compile('(?P<chopped>.*?)\n(?P<remainder>.*)',
                             re.UNICODE | re.DOTALL)
        match = pattern.search(self.current_string)
        if match:
            self.current_string = match.group('remainder')
            return match.group('chopped') + '\n'
        else:
            line = self.current_string
            self.current_string = ''
            return line

    def stop_pattern(self):
        return re.compile(
            '^(?P<before>.*?)(?P<tag>%s)(?P<remainder>.*)$' %
            self.current_block.stop_pattern, re.UNICODE | re.DOTALL)
Example #60
0
def ImportModel(filename, use_kdtree=True, callback=None, **kwargs):
    global vertices, edges, kdtree
    vertices = 0
    edges = 0
    kdtree = None

    normal_conflict_warning_seen = False

    if hasattr(filename, "read"):
        # make sure that the input stream can seek and has ".len"
        f = StringIO(filename.read())
        # useful for later error messages
        filename = "input stream"
    else:
        try:
            url_file = pycam.Utils.URIHandler(filename).open()
            # urllib.urlopen objects do not support "seek" - so we need to read
            # the whole file at once. This is ugly - anyone with a better idea?
            f = StringIO(url_file.read())
            # TODO: the above ".read" may be incomplete - this is ugly
            # see http://patrakov.blogspot.com/2011/03/case-of-non-raised-exception.html
            # and http://stackoverflow.com/questions/1824069/
            url_file.close()
        except IOError as err_msg:
            log.error("STLImporter: Failed to read file (%s): %s", filename,
                      err_msg)
            return None
    # Read the first two lines of (potentially non-binary) input - they should
    # contain "solid" and "facet".
    header_lines = []
    while len(header_lines) < 2:
        line = f.readline(200)
        if len(line) == 0:
            # empty line (not even a line-feed) -> EOF
            log.error("STLImporter: No valid lines found in '%s'", filename)
            return None
        # ignore comment lines
        # note: partial comments (starting within a line) are not handled
        if not line.startswith(";"):
            header_lines.append(line)
    header = "".join(header_lines)
    # read byte 80 to 83 - they contain the "numfacets" value in binary format
    f.seek(80)
    numfacets = unpack("<I", f.read(4))[0]
    binary = False
    log.debug("STL import info: %s / %s / %s / %s", f.len, numfacets,
              header.find("solid"), header.find("facet"))

    if f.len == (84 + 50 * numfacets):
        binary = True
    elif header.find("solid") >= 0 and header.find("facet") >= 0:
        binary = False
        f.seek(0)
    else:
        log.error("STLImporter: STL binary/ascii detection failed")
        return None

    if use_kdtree:
        kdtree = PointKdtree([], 3, 1, epsilon)
    model = Model(use_kdtree)

    t = None
    p1 = None
    p2 = None
    p3 = None

    if binary:
        for i in range(1, numfacets + 1):
            if callback and callback():
                log.warn("STLImporter: load model operation cancelled")
                return None
            a1 = unpack("<f", f.read(4))[0]
            a2 = unpack("<f", f.read(4))[0]
            a3 = unpack("<f", f.read(4))[0]

            n = (float(a1), float(a2), float(a3), 'v')

            v11 = unpack("<f", f.read(4))[0]
            v12 = unpack("<f", f.read(4))[0]
            v13 = unpack("<f", f.read(4))[0]

            p1 = UniqueVertex(float(v11), float(v12), float(v13))

            v21 = unpack("<f", f.read(4))[0]
            v22 = unpack("<f", f.read(4))[0]
            v23 = unpack("<f", f.read(4))[0]

            p2 = UniqueVertex(float(v21), float(v22), float(v23))

            v31 = unpack("<f", f.read(4))[0]
            v32 = unpack("<f", f.read(4))[0]
            v33 = unpack("<f", f.read(4))[0]

            p3 = UniqueVertex(float(v31), float(v32), float(v33))

            # not used (additional attributes)
            f.read(2)

            dotcross = pdot(n, pcross(psub(p2, p1), psub(p3, p1)))
            if a1 == a2 == a3 == 0:
                dotcross = pcross(psub(p2, p1), psub(p3, p1))[2]
                n = None

            if dotcross > 0:
                # Triangle expects the vertices in clockwise order
                t = Triangle(p1, p3, p2)
            elif dotcross < 0:
                if not normal_conflict_warning_seen:
                    log.warn(
                        "Inconsistent normal/vertices found in facet definition %d of '%s'. "
                        "Please validate the STL file!", i, filename)
                    normal_conflict_warning_seen = True
                t = Triangle(p1, p2, p3)
            else:
                # the three points are in a line - or two points are identical
                # usually this is caused by points, that are too close together
                # check the tolerance value in pycam/Geometry/PointKdtree.py
                log.warn(
                    "Skipping invalid triangle: %s / %s / %s (maybe the resolution of the "
                    "model is too high?)", p1, p2, p3)
                continue
            if n:
                t.normal = n

            model.append(t)
    else:
        solid = re.compile(r"\s*solid\s+(\w+)\s+.*")
        endsolid = re.compile(r"\s*endsolid\s*")
        facet = re.compile(r"\s*facet\s*")
        normal = re.compile(
            r"\s*facet\s+normal" +
            r"\s+(?P<x>[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?)" +
            r"\s+(?P<y>[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?)" +
            r"\s+(?P<z>[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?)\s+")
        endfacet = re.compile(r"\s*endfacet\s+")
        loop = re.compile(r"\s*outer\s+loop\s+")
        endloop = re.compile(r"\s*endloop\s+")
        vertex = re.compile(
            r"\s*vertex" +
            r"\s+(?P<x>[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?)" +
            r"\s+(?P<y>[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?)" +
            r"\s+(?P<z>[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?)\s+")

        current_line = 0

        for line in f:
            if callback and callback():
                log.warn("STLImporter: load model operation cancelled")
                return None
            current_line += 1
            m = solid.match(line)
            if m:
                model.name = m.group(1)
                continue

            m = facet.match(line)
            if m:
                m = normal.match(line)
                if m:
                    n = (float(m.group('x')), float(m.group('y')),
                         float(m.group('z')), 'v')
                else:
                    n = None
                continue
            m = loop.match(line)
            if m:
                continue
            m = vertex.match(line)
            if m:
                p = UniqueVertex(float(m.group('x')), float(m.group('y')),
                                 float(m.group('z')))
                if p1 is None:
                    p1 = p
                elif p2 is None:
                    p2 = p
                elif p3 is None:
                    p3 = p
                else:
                    log.error(
                        "STLImporter: more then 3 points in facet (line %d)",
                        current_line)
                continue
            m = endloop.match(line)
            if m:
                continue
            m = endfacet.match(line)
            if m:
                if None in (p1, p2, p3):
                    log.warn(
                        "Invalid facet definition in line %d of '%s'. Please validate the "
                        "STL file!", current_line, filename)
                    n, p1, p2, p3 = None, None, None, None
                    continue
                if not n:
                    n = pnormalized(pcross(psub(p2, p1), psub(p3, p1)))

                # validate the normal
                # The three vertices of a triangle in an STL file are supposed
                # to be in counter-clockwise order. This should match the
                # direction of the normal.
                if n is None:
                    # invalid triangle (zero-length vector)
                    dotcross = 0
                else:
                    # make sure the points are in ClockWise order
                    dotcross = pdot(n, pcross(psub(p2, p1), psub(p3, p1)))
                if dotcross > 0:
                    # Triangle expects the vertices in clockwise order
                    t = Triangle(p1, p3, p2, n)
                elif dotcross < 0:
                    if not normal_conflict_warning_seen:
                        log.warn(
                            "Inconsistent normal/vertices found in line %d of '%s'. Please "
                            "validate the STL file!", current_line, filename)
                        normal_conflict_warning_seen = True
                    t = Triangle(p1, p2, p3, n)
                else:
                    # The three points are in a line - or two points are
                    # identical. Usually this is caused by points, that are too
                    # close together. Check the tolerance value in
                    # pycam/Geometry/PointKdtree.py.
                    log.warn(
                        "Skipping invalid triangle: %s / %s / %s (maybe the resolution of "
                        "the model is too high?)", p1, p2, p3)
                    n, p1, p2, p3 = (None, None, None, None)
                    continue
                n, p1, p2, p3 = (None, None, None, None)
                model.append(t)
                continue
            m = endsolid.match(line)
            if m:
                continue

    log.info("Imported STL model: %d vertices, %d edges, %d triangles",
             vertices, edges, len(model.triangles()))
    vertices = 0
    edges = 0
    kdtree = None

    if not model:
        # no valid items added to the model
        return None
    else:
        return model