class TWebsocketClient(TTransportBase):
    """Tornado transport implementation using a websocket"""
    def __init__(self, url):
        self._url = url
        self._isopen = False
        self._wbuf = StringIO()
        self._rbuf = StringIO()
    
    def open(self):
        self._ws = websocket.WebSocket()
        self._ws.connect(self._url)
        self._isopen = True

    def close(self):
        self._isopen = False
        self._ws.close()

    def isOpen(self):
        return self._isopen

    def write(self, buf):
        self._wbuf.write(buf)

    def flush(self):
        self._ws.send(self._wbuf.getvalue())
        self._wbuf = StringIO()

    def read(self, sz):
        ret = self._rbuf.read(sz)
        if len(ret) < sz:
            self._rbuf = StringIO(self._ws.recv())
            ret += self.read(sz - len(ret))
        return ret
Example #2
0
 def browse(self, max_lines=None, headers=None):
   """Try reading specified number of lines from the CSV object.
   Args:
     max_lines: max number of lines to read. If None, the whole file is read
     headers: a list of strings as column names. If None, it will use "col0, col1..."
   Returns:
     A pandas DataFrame with the schema inferred from the data.
   Raises:
     Exception if the csv object cannot be read or not enough lines to read, or the
     headers size does not match columns size.
   """
   if self.path.startswith('gs://'):
     lines = Csv._read_gcs_lines(self.path, max_lines)
   else:
     lines = Csv._read_local_lines(self.path, max_lines)
   if len(lines) == 0:
     return pd.DataFrame(columns=headers)
   columns_size = len(next(csv.reader([lines[0]], delimiter=self._delimiter)))
   if headers is None:
     headers = ['col' + newstr(e) for e in range(columns_size)]
   if len(headers) != columns_size:
     raise Exception('Number of columns in CSV do not match number of headers')
   buf = StringIO()
   for line in lines:
     buf.write(line)
     buf.write('\n')
   buf.seek(0)
   df = pd.read_csv(buf, names=headers, delimiter=self._delimiter)
   for key, col in df.iteritems():
     if self._is_probably_categorical(col):
       df[key] = df[key].astype('category')
   return df
 def __init__(self, config):
     stream = StringIO()
     stream.write("Current platform settings:\n")
     configfile.pretty_print(config.get_all_platforms(), stream)
     self.description = stream.getvalue()
     stream.close()
     self.config = config
Example #4
0
	def test():
		alltargets = StringIO()
		allpredictions = StringIO()
		for protein in testingProteins:
			alltargets.write(protein.types)
			i = 0;
			for inputs,targets in zip(protein.inputss,protein.targetss):
				i = i + 1
				if i < 3:
					continue
				highestOutput = perceptron_h.test(inputs);
				bestPrediction = 'H';
				e = perceptron_e.test(inputs)
				if e > highestOutput:
					highestOutput = e;
					bestPrediction = 'E'
				u = perceptron_u.test(inputs)
				if u > highestOutput:
					highestOutput = u;
					bestPrediction = '_'
				allpredictions.write(bestPrediction)
		ch,ce,cu = calculatecoef(alltargets.getvalue(),allpredictions.getvalue())
		yH.append(ch.performance())
		yE.append(ce.performance())
		yU.append(cu.performance())
		print('')
class RawHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
	def setup(self):
		self.socket = self.request
		self.socket.handler = self
		self.rfile = StringIO()
		self.rfilePos = 0
		self.wfile = SingleSocketWriter(self.socket)

	def handle(self):
		pass

	def finish(self):
		pass

	def data_came_in(self, socket, data):
		self.rfile.write(data)
		self.rfile.seek(0, self.rfilePos)
		if self.rfile.read().find('\r\n\r\n') >= 0:
			self.rfile.seek(0, self.rfilePos)
			self.done = False
			self.handle_one_request()
			if self.done:
				socket.close()
				return
			self.rfilePos = self.rfile.tell()
		# TODO: Check close_connection (a la BaseHTTPRequestHandler.handle())

	def connection_flushed(self, unused_socket):
		self.wfile.flushed()
Example #6
0
    def upload_from_string(self, data, content_type='text/plain'):
        """Upload contents of this key from the provided string.

        .. note::

           The effect of uploading to an existing key depends on the
           "versioning" and "lifecycle" policies defined on the key's
           bucket.  In the absence of those policies, upload will
           overwrite any existing contents.

           See the `object versioning
           <https://cloud.google.com/storage/docs/object-versioning>`_ and
           `lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
           API documents for details.

        :type data: string
        :param data: The data to store in this key.

        :rtype: :class:`Key`
        :returns: The updated Key object.
        """
        string_buffer = StringIO()
        string_buffer.write(data)
        self.upload_from_file(file_obj=string_buffer, rewind=True,
                              size=string_buffer.len,
                              content_type=content_type)
        return self
Example #7
0
    def get(self,filename):
        name,sep,ext = filename.rpartition(".")
        if not sep:
            img_name = ext
        else:
            img_name = name
        try:
            img_file = self.fs.get_version(filename=img_name)
            img = img_file.read()
        except gridfs.errors.NoFile:
            raise tornado.web.HTTPError(500, 'image is not found ')
    
        resize = self.get_argument('_re', None)
        if resize :
            width, resep, height = resize.rpartition("x")
            output = StringIO()
            output.write(img)
            output.seek(0)
            im = Image.open(output)
            format = im.format
#            size = im.size
#            logging.info("format is %s ,size is %s" %(format,size))
            im = im.resize((int(width),int(height)), Image.ANTIALIAS)
            tmp = StringIO()
            im.save(tmp, format)
            img = tmp.getvalue()
            tmp.close()
            output.close()

        self.add_header('Content-Type',img_file.content_type)
        self.write(img)
        self.finish()
Example #8
0
 def encode(self, text):
     l = len(text)
     output = StringIO()
     val = self.k - (l % self.k)
     for _ in xrange(val):
         output.write('%02x' % val)
     return text + binascii.unhexlify(output.getvalue())
Example #9
0
    def _send_command(self, command, host, port, timeout):
        sock = socket.socket()
        sock.settimeout(timeout)
        buf = StringIO()
        chunk_size = 1024
        # try-finally and try-except to stay compatible with python 2.4
        try:
            try:
                # Connect to the zk client port and send the stat command
                sock.connect((host, port))
                sock.sendall(command)

                # Read the response into a StringIO buffer
                chunk = sock.recv(chunk_size)
                buf.write(chunk)
                num_reads = 1
                max_reads = 10000
                while chunk:
                    if num_reads > max_reads:
                        # Safeguard against an infinite loop
                        raise Exception("Read %s bytes before exceeding max reads of %s. "
                                        % (buf.tell(), max_reads))
                    chunk = sock.recv(chunk_size)
                    buf.write(chunk)
                    num_reads += 1
            except (socket.timeout, socket.error):
                raise ZKConnectionFailure()
        finally:
            sock.close()
        return buf
 def download(self, subtitles):
     login = Login_Itasa()
     for subtitle in subtitles:
         content_type = ''
         attempts = 0
         while 'application/zip' not in content_type:
             url = ITASA_SUBTITLE_DOWNLOAD.format(subtitle['id'], login.authcode, ITASA_KEY)
             file = HTTP.Request(url, cacheTime=0)
             content_type = file.headers['content-type']
             if 'text/xml' in content_type:
                 Log.Debug('[ {} ] Authcode not valid. Trying to retrieve it..'.format(PLUGIN_NAME))
                 login.do_authcode()
             if 'text/html' in content_type and 'utenti registrati' in file.content:
                 Log.Debug('[ {} ] Not logged. Trying to log in'.format(PLUGIN_NAME))
                 login.do_login()
             if 'text/html' in content_type and 'limite di download' in file.content:
                 Log.Debug('[ {} ] You have reached the download limit for this subtitle'.format(PLUGIN_NAME))
                 break
             if attempts > 5:
                 break
             attempts += 1
         filebuffer = StringIO()
         filebuffer.write(file)
         filebuffer.flush()
         Log.Debug('[ {} ] Subtitle {} (id: {}) downloaded!'.format(PLUGIN_NAME, subtitle['name'], subtitle['id']))
         for sub_content in self.unzip(filebuffer):
             sub_hash = hashlib.md5(sub_content).hexdigest()
             subtitle['subs'].append((sub_hash, sub_content))
     return subtitles
Example #11
0
    def __patch_jenkins_config( self ):
        """
        A context manager that retrieves the Jenkins configuration XML, deserializes it into an
        XML ElementTree, yields the XML tree, then serializes the tree and saves it back to
        Jenkins.
        """
        config_file = StringIO( )
        if run( 'test -f ~/config.xml', quiet=True ).succeeded:
            fresh_instance = False
            get( remote_path='~/config.xml', local_path=config_file )
        else:
            # Get the in-memory config as the on-disk one may be absent on a fresh instance.
            # Luckily, a fresh instance won't have any configured security.
            fresh_instance = True
            config_url = 'http://localhost:8080/computer/(master)/config.xml'
            with hide( 'output' ):
                config_file.write( run( 'curl "%s"' % config_url ) )
        config_file.seek( 0 )
        config = ElementTree.parse( config_file )

        yield config

        config_file.truncate( 0 )
        config.write( config_file, encoding='utf-8', xml_declaration=True )
        if fresh_instance:
            self.__service_jenkins( 'stop' )
        try:
            put( local_path=config_file, remote_path='~/config.xml' )
        finally:
            if fresh_instance:
                self.__service_jenkins( 'start' )
            else:
                log.warn( 'Visit the Jenkins web UI and click Manage Jenkins - Reload '
                          'Configuration from Disk' )
Example #12
0
 def preprocess_save(self, item):
     """Function called before ``item`` is saved."""
     if len(item.unsaved_properties):
         self.update_xml_tree(item)
         stream = StringIO()
         stream.write(etree.tostring(item.xml_tree))
         item[self.stream_property] = stream
Example #13
0
    def __init__(self, fh):
        super(OfxPreprocessedFile,self).__init__(fh)

        if self.fh is None:
            return

        ofx_string = self.fh.read()

        # find all closing tags as hints
        closing_tags = [ t.upper() for t in re.findall(r'(?i)</([a-z0-9_\.]+)>', ofx_string) ]

        # close all tags that don't have closing tags and
        # leave all other data intact
        last_open_tag = None
        tokens        = re.split(r'(?i)(</?[a-z0-9_\.]+>)', ofx_string)
        new_fh        = StringIO()
        for idx,token in enumerate(tokens):
            is_closing_tag = token.startswith('</')
            is_processing_tag = token.startswith('<?')
            is_cdata = token.startswith('<!')
            is_tag = token.startswith('<') and not is_cdata
            is_open_tag = is_tag and not is_closing_tag and not is_processing_tag
            if is_tag:
                if last_open_tag is not None:
                    new_fh.write("</%s>" % last_open_tag)
                    last_open_tag = None
            if is_open_tag:
                tag_name = re.findall(r'(?i)<([a-z0-9_\.]+)>', token)[0]
                if tag_name.upper() not in closing_tags:
                    last_open_tag = tag_name
            new_fh.write(token)
        new_fh.seek(0)
        self.fh = new_fh
Example #14
0
File: bed.py Project: lukauskas/dgw
def read_bed(bed_file):
    """
    Parses the bed file specified into `pd.DataFrame`
    :param bed_file:
    :return:
    :rtype: `pd.DataFrame`
    """
    f = open(bed_file, 'r')
    try:
        s = StringIO()
        # Filter out all lines that do not start with "chr" as BED files are allowed to contain some junk
        for line in f:
            if line.startswith('chr'):
                s.write(line)
        s.seek(0)
        regions = pd.read_csv(s, sep="\t", header=None)
    finally:
        f.close()
        s.close()
    regions.columns = BED_COLUMNS[:len(regions.columns)]

    if len(regions.name) != len(regions.name.drop_duplicates()):
        raise Exception('Input BED file {0!r} contains duplicate values in name column. '
                        'Please ensure the names of the regions are unique'.format(bed_file))

    if 'name' in regions.columns:
        regions = regions.set_index('name')


    return Regions(regions)
Example #15
0
def check_cpaste(code, should_fail=False):
    """Execute code via 'cpaste' and ensure it was executed, unless
    should_fail is set.
    """
    _ip.user_ns['code_ran'] = False

    src = StringIO()
    if not hasattr(src, 'encoding'):
        # IPython expects stdin to have an encoding attribute
        src.encoding = None
    src.write('\n')
    src.write(code)
    src.write('\n--\n')
    src.seek(0)

    stdin_save = sys.stdin
    sys.stdin = src
    
    try:
        context = tt.AssertPrints if should_fail else tt.AssertNotPrints
        with context("Traceback (most recent call last)"):
                _ip.magic('cpaste')
        
        if not should_fail:
            assert _ip.user_ns['code_ran']
    finally:
        sys.stdin = stdin_save
Example #16
0
    def list_directory(self, path):
        """Helper to produce a directory listing (absent index.html).

        Return value is either a file object, or None (indicating an
        error).  In either case, the headers are sent, making the
        interface the same as for send_head().

        """
        try:
            list = os.listdir(path)
        except os.error:
            self.send_error(404, "No permission to list directory");
            return None
        list.sort(lambda a, b: cmp(a.lower(), b.lower()))
        f = StringIO()
        f.write("<title>Directory listing for %s</title>\n" % self.path)
        f.write("<h2>Directory listing for %s</h2>\n" % self.path)
        f.write("<hr>\n<ul>\n")
        for name in list:
            fullname = os.path.join(path, name)
            displayname = linkname = name = cgi.escape(name)
            # Append / for directories or @ for symbolic links
            if os.path.isdir(fullname):
                displayname = name + "/"
                linkname = name + "/"
            if os.path.islink(fullname):
                displayname = name + "@"
                # Note: a link to a directory displays with @ and links with /
            f.write('<li><a href="%s">%s</a>\n' % (linkname, displayname))
        f.write("</ul>\n<hr>\n")
        f.seek(0)
        self.send_response(200)
        self.send_header("Content-type", "text/html")
        self.end_headers()
        return f
Example #17
0
class MockRequest(object):
    def __init__(self, utoken, uri=None):
        self._utoken = utoken
        self.uri = uri
        self._finishedDeferreds = []
        self.stream = StringIO()
        self.args = {}

    def getSession(self):
        return MockSession(self._utoken)

    def finish(self):
        pass

    def write(self, data):
        self.stream.write(data)

    def clear(self):
        self.stream.close()
        self.stream = StringIO()

    def notifyFinish(self):
        finished = Deferred()
        self._finishedDeferreds.append(finished)
        return finished

    def setResponseCode(self, code, error):
        self.code = code
        self.error = error

    def __str__(self):
        return "\nrequest:args = %s\ndata = %s\n" % (self.args, self.stream.getvalue())
Example #18
0
 def __updateLog4j__(self, inst, debug=[], trace=[]):
     LOG.info("Updating log4j properties - DEBUG[%d] / TRACE[%d]", len(debug), len(trace))
     
     conf_file = os.path.join(self.hstore_dir, "log4j.properties")
     targetLevels = {
         "DEBUG": debug,
         "TRACE": trace,
     }
     with settings(host_string=inst.public_dns_name):
         contents = self.get_file(inst, conf_file)
         assert len(contents) > 0, "Configuration file '%s' is empty" % conf_file
         
         # Go through the file and update anything that is already there
         baseRegex = r"(log4j\.logger\.(?:%s))[\s]*=[\s]*(?:INFO|DEBUG|TRACE)(|,[\s]+[\w]+)"
         for level, clazzes in targetLevels.iteritems():
             contents = re.sub(baseRegex % "|".join(map(string.strip, clazzes)),
                             r"\1="+level+r"\2",
                             contents, flags=re.IGNORECASE)
         
         # Then add in anybody that is missing
         first = True
         for level, clazzes in targetLevels.iteritems():
             for clazz in clazzes:
                 if contents.find(clazz) == -1:
                     if first: contents += "\n"
                     contents += "\nlog4j.logger.%s=%s" % (clazz, level)
                     first = False
         ## FOR
         
         sio = StringIO()
         sio.write(contents)
         put(local_path=sio, remote_path=conf_file)
Example #19
0
    def get_text(self, start_line=0, start_offset=0, end_line=-1, end_offset=-1):
        if start_line < 0:
            start_line = len(self.__lines) -1
        if end_line < 0:
            end_line = len(self.__lines) -1
        if start_offset < 0:
            start_offset = len(self.__lines[start_line])
        if end_offset < 0:
            end_offset = len(self.__lines[end_line])

        start_line, start_offset, end_line, end_offset = order_positions(start_line, start_offset, end_line, end_offset)

        if start_line == end_line:
            return self.__lines[start_line][start_offset:end_offset]

        si = StringIO()

        line = start_line
        si.write(self.__lines[line][start_offset:])
        line += 1

        while line < end_line:
            si.write("\n")
            si.write(self.__lines[line][start_offset:])
            line += 1

        si.write("\n")
        si.write(self.__lines[line][:end_offset])

        return si.getvalue()
    def convert_df_to_njson_and_upload(self, df, destination_blobname, metadata={}, tmp_bucket='isb-cgc-dev'):

        log.info("Converting dataframe into a new-line delimited JSON file")

        file_to_upload = StringIO()

        for i, rec in df.iterrows():
            file_to_upload.write(rec.convert_objects(convert_numeric=False).to_json() + "\n")
        file_to_upload.seek(0)

        bucket = self.client.get_bucket(tmp_bucket)
        upload_blob = storage.blob.Blob(destination_blobname, bucket=bucket)

        upload_blob.upload_from_string(file_to_upload.getvalue())
        # set blob metadata
        if metadata:
            log.info("Setting object metadata")
            upload_blob.metadata = metadata
            upload_blob.patch()
        file_to_upload.close()

        # check if the uploaded blob exists. Just a sanity check
        if upload_blob.exists():
            log.info("The uploaded file {0} has size {1} bytes.".format(destination_blobname, upload_blob.size))
            return True
        else:
            raise Exception('File upload failed - {0}.'.format(destination_blobname)) 
Example #21
0
def main(event, category, dry_run, html_log, verbose):
    contribs = db.m.Contribution.find(db.m.Contribution.description.op('~')('</[a-zA-Z]+>'))
    log = StringIO() if html_log else None

    if event:
        contribs = contribs.filter(db.m.Contribution.event_id == event)
    elif category:
        contribs = contribs.join(db.m.Event).filter(db.m.Event.category_chain_overlaps(category))

    if log:
        log.write('<table style="width: 100%;">')
    for contrib in contribs:
        if '<html>' in unicode(contrib.description):
            click.echo(click.style('[HTML DOCUMENT] ', fg='red', bold=True) + repr(contrib))
        else:
            migrate_description(contrib, verbose, log)

    if log:
        log.write('</table>')

    if html_log:
        log.seek(0)
        html_log.write(HTML_TPL.format(log.read()))

    if not dry_run:
        db.session.commit()
Example #22
0
    def _send_password_resets(self):
        db = request.environ['sqlalchemy.session']
        model = request.environ['sqlalchemy.model']
        output = StringIO()

        resets = db.query(model.PasswordResetRequest).filter_by(sent_on=null()).all()

        for reset in resets:
            try:
                c.password_reset_link = h.site_url() + h.url_for(controller='password', action='reset', token=reset.token)
                c.password_reset_code = reset.token
                body = render_jinja('messages/password_reset.jinja')
                message = MIMEText(body.encode('utf-8'), 'plain', 'utf-8')
                message['Subject'] = _("Password reset instruction from %s") % h.site_name()
                message['To'] = reset.user.email
                message['From'] = config['from_address']
                
                ms = EmailSender(to_addresses = message['To'])
                ms.send_mime(message)

                reset.sent_on = datetime.utcnow()
                db.commit()

                output.write(_("Sending password reset e-mail to %s\n") % message['To'])
            except:
                output.write(_("Can't send password reset e-mail: %s\n") % format_exc())
        return output.getvalue()
Example #23
0
def install(self):
    out = StringIO()

    # Install types
    type_info = listTypes(PROJECTNAME)
    installTypes(self, out, type_info, PROJECTNAME)

    # Install tools
    add_tool = self.manage_addProduct[PROJECTNAME].manage_addTool
    if not self.objectIds(spec=FSSTool.meta_type):
        add_tool(FSSTool.meta_type)

    # Install skin
    install_subskin(self, out, GLOBALS)

    # Install configlet
    cp_tool = getToolByName(self, 'portal_controlpanel')
    try:
        cp_tool.registerConfiglet(**fss_prefs_configlet)
    except:
        pass

    # Install modifier
    install_modifier(self, out)

    out.write('Installation completed.\n')
    return out.getvalue()
Example #24
0
    def apply_locale(self, locale, out_fn=None):
        # Adjust the locals value to the new value
        newconf = StringIO()
        for line in util.load_file(self.login_conf_fn).splitlines():
            newconf.write(re.sub(r'^default:',
                                 r'default:lang=%s:' % locale, line))
            newconf.write("\n")

        # Make a backup of login.conf.
        util.copy(self.login_conf_fn, self.login_conf_fn_bak)

        # And write the new login.conf.
        util.write_file(self.login_conf_fn, newconf.getvalue())

        try:
            LOG.debug("Running cap_mkdb for %s", locale)
            util.subp(['cap_mkdb', self.login_conf_fn])
        except util.ProcessExecutionError:
            # cap_mkdb failed, so restore the backup.
            util.logexc(LOG, "Failed to apply locale %s", locale)
            try:
                util.copy(self.login_conf_fn_bak, self.login_conf_fn)
            except IOError:
                util.logexc(LOG, "Failed to restore %s backup",
                            self.login_conf_fn)
Example #25
0
def merge_previous_list(csv_data, list_type_id):
    """
    Merges the most recent list from app.db of type list_type_id
    and merges into csv_data.

    Args:
        csv_data: CSV data
        list_type_id: List type
    Returns:
        list of email addresses (usually larger than csv_data)
    Raises:
        Exception:
    """
    job = Job.previous_by_list_type_id(list_type_id)
    prev_csv = decompress(job.compressed_csv)
    seen = set()  # set for fast O(1) amortized lookup
    new_sio = StringIO(csv_data)
    for row in reader(new_sio):
        seen.add(row[0])
    old_sio = StringIO(prev_csv)
    for row in reader(old_sio):
        if row[0] in seen:
            continue  # skip duplicate
        new_sio.write(row)
    csv = new_sio.getvalue()
    return csv
Example #26
0
class HTMLInputHandlerTests(TestCase):
    def setUp(self):
        self.file_like_object = StringIO()
        #Note the whitespace, this tests strip()
        self.file_like_object.write("This is line 1\n")
        self.file_like_object.write(" This is line 2\n")
        self.file_like_object.write("This is line 3 \n")
        #Place it at the beginning of the file again
        self.file_like_object.seek(0)

    def tearDown(self):
        self.file_like_object.close()

    def test_load_file_lines(self):
        """Correctly returns file lines as a list of strings"""
        # can't load a string, etc...
        self.assertRaises(IncompetentDeveloperError, load_file_lines, 'This is not a file')
        result = load_file_lines(self.file_like_object)
        self.assertEqual(result,
                         ["This is line 1",
                          "This is line 2",
                          "This is line 3"])

    def test_load_file_contents(self):
        """Correctly returns file contents"""
        # can't load a string, etc...
        self.assertRaises(IncompetentDeveloperError, load_file_contents, 'This is not a file')

        result = load_file_contents(self.file_like_object)
        #Note the whitespace
        self.assertEqual(result, "This is line 1\n This is line 2\nThis is line 3 \n")
Example #27
0
    def testApplyFilter(self):
        # FIXME: broken with distribute
        return
        s, out = commands.getstatusoutput('python scripts/vcf_filter.py --site-quality 30 test/example-4.0.vcf sq')
        #print(out)
        assert s == 0
        buf = StringIO()
        buf.write(out)
        buf.seek(0)

        print(buf.getvalue())
        reader = vcf.Reader(buf)


        # check filter got into output file
        assert 'sq30' in reader.filters

        print(reader.filters)

        # check sites were filtered
        n = 0
        for r in reader:
            if r.QUAL < 30:
                assert 'sq30' in r.FILTER
                n += 1
            else:
                assert 'sq30' not in r.FILTER
        assert n == 2
Example #28
0
    def _send_invitations(self):
        model = request.environ["sqlalchemy.model"]
        db = request.environ["sqlalchemy.session"]
        output = StringIO()

        invitations = db.query(model.Invitation).filter_by(sent=False).all()

        for invitation in invitations:
            try:
                c.invitation_link = h.site_url() + h.url_for(controller='signup', action='create', id=invitation.token)
                c.invitation = invitation
                body = render_jinja('messages/invitation_mail.jinja')
                message = MIMEText(body.encode('utf-8'), 'plain', 'utf-8')
                message['Subject'] = _("You have been invited to %s") % h.site_name()
                message['To'] = invitation.to_address
                message['From'] = config['from_address']

                ms = EmailSender(to_addresses = message['To'])
                ms.send_mime(message)

                invitation.sent = True
                db.commit()
                
                output.write(_("Sending invitation e-mail to %s\n") % message['To'])
            except:
                output.write(_("Can't send invitation e-mail: %s\n") % format_exc())
        return output.getvalue()
Example #29
0
	def test():
		alltargets = StringIO()
		allpredictions = StringIO()
		for protein in testingProteins:
			alltargets.write(protein.types)
			i = 0;
			for inputs in protein.inputss:
				i = i + 1
				if i < 3:
					continue
				highestOutput = 0
				bestPrediction = 'H'
				for prediction,value in zip(perceptron.activate(inputs),('H','E','_')):
					if prediction > highestOutput:
						highestOutput = prediction;
						bestPrediction = value
				allpredictions.write(bestPrediction)
		ch = cc()
		ce = cc()
		cu = cc()
		ch,ce,cu = calculatecoef(alltargets.getvalue(),allpredictions.getvalue())
		
		yH.append(ch.performance())
		yE.append(ce.performance())
		yU.append(cu.performance())
		
		print(ch.performance(),ce.performance(),cu.performance())
Example #30
0
    def _send_balance_invitations(self):
        db = request.environ['sqlalchemy.session']
        model = request.environ['sqlalchemy.model']
        output = StringIO()

        invitations = db.query(model.BalanceInvitation).filter_by(sent=False).all()

        for invitation in invitations:
            try:
                c.invitation_link = h.site_url() + h.url_for(controller='balances', action='invitation', id=invitation.token)
                c.invitation = invitation
                body = render_jinja('messages/balance_invitation.jinja')
                message = MIMEText(body.encode('utf-8'), 'plain', 'utf-8')
                message['Subject'] = _("Invitation to work on a balance from %s") % h.site_name()
                message['To'] = invitation.to_address
                message['From'] = config['from_address']
                
                ms = EmailSender(to_addresses = message['To'])
                ms.send_mime(message)

                invitation.sent = True
                db.commit()
                
                output.write(_("Sending balance invitation e-mail to %s\n") % message['To'])
            except:
                output.write(_("Can't send balance invitation e-mail: %s\n") % format_exc())
        return output.getvalue()
Example #31
0
 def write(self, s):
     log.write(s)
     StringIO.write(self, s)
Example #32
0
def _AsciiBase85Decode(input):
    """This is not used - Acrobat Reader decodes for you - but a round
    trip is essential for testing."""
    outstream = StringIO()
    #strip all whitespace
    stripped = ''.join(input.split())
    #check end
    assert stripped[-2:] == '~>', 'Invalid terminator for Ascii Base 85 Stream'
    stripped = stripped[:-2]  #chop off terminator

    #may have 'z' in it which complicates matters - expand them
    stripped = stripped.replace('z', '!!!!!')
    # special rules apply if not a multiple of five bytes.
    whole_word_count, remainder_size = divmod(len(stripped), 5)
    #print '%d words, %d leftover' % (whole_word_count, remainder_size)
    assert remainder_size != 1, 'invalid Ascii 85 stream!'
    cut = 5 * whole_word_count
    body, lastbit = stripped[0:cut], stripped[cut:]

    for i in range(whole_word_count):
        offset = i * 5
        c1 = ord(body[offset]) - 33
        c2 = ord(body[offset + 1]) - 33
        c3 = ord(body[offset + 2]) - 33
        c4 = ord(body[offset + 3]) - 33
        c5 = ord(body[offset + 4]) - 33

        num = ((85**4) * c1) + ((85**3) * c2) + ((85**2) * c3) + (85 * c4) + c5

        temp, b4 = divmod(num, 256)
        temp, b3 = divmod(temp, 256)
        b1, b2 = divmod(temp, 256)

        assert num == 16777216 * b1 + 65536 * b2 + 256 * b3 + b4, 'dodgy code!'
        outstream.write(chr(b1))
        outstream.write(chr(b2))
        outstream.write(chr(b3))
        outstream.write(chr(b4))

    #decode however many bytes we have as usual
    if remainder_size > 0:
        while len(lastbit) < 5:
            lastbit = lastbit + '!'
        c1 = ord(lastbit[0]) - 33
        c2 = ord(lastbit[1]) - 33
        c3 = ord(lastbit[2]) - 33
        c4 = ord(lastbit[3]) - 33
        c5 = ord(lastbit[4]) - 33
        num = ((85**4) * c1) + ((85**3) * c2) + ((85**2) * c3) + (85 * c4) + c5
        temp, b4 = divmod(num, 256)
        temp, b3 = divmod(temp, 256)
        b1, b2 = divmod(temp, 256)
        assert num == 16777216 * b1 + 65536 * b2 + 256 * b3 + b4, 'dodgy code!'
        #print 'decoding: %d %d %d %d %d -> %d -> %d %d %d %d' % (
        #    c1,c2,c3,c4,c5,num,b1,b2,b3,b4)

        #the last character needs 1 adding; the encoding loses
        #data by rounding the number to x bytes, and when
        #divided repeatedly we get one less
        if remainder_size == 2:
            lastword = chr(b1 + 1)
        elif remainder_size == 3:
            lastword = chr(b1) + chr(b2 + 1)
        elif remainder_size == 4:
            lastword = chr(b1) + chr(b2) + chr(b3 + 1)
        outstream.write(lastword)

    #terminator code for ascii 85
    outstream.seek(0)
    return outstream.read()
Example #33
0
def _AsciiBase85Encode(input):
    """This is a compact encoding used for binary data within
    a PDF file.  Four bytes of binary data become five bytes of
    ASCII.  This is the default method used for encoding images."""
    outstream = StringIO()
    # special rules apply if not a multiple of four bytes.
    whole_word_count, remainder_size = divmod(len(input), 4)
    cut = 4 * whole_word_count
    body, lastbit = input[0:cut], input[cut:]

    for i in range(whole_word_count):
        offset = i * 4
        b1 = ord(body[offset])
        b2 = ord(body[offset + 1])
        b3 = ord(body[offset + 2])
        b4 = ord(body[offset + 3])

        num = 16777216 * b1 + 65536 * b2 + 256 * b3 + b4

        if num == 0:
            #special case
            outstream.write('z')
        else:
            #solve for five base-85 numbers
            temp, c5 = divmod(num, 85)
            temp, c4 = divmod(temp, 85)
            temp, c3 = divmod(temp, 85)
            c1, c2 = divmod(temp, 85)
            assert ((85**4) * c1) + ((85**3) * c2) + (
                (85**2) * c3) + (85 * c4) + c5 == num, 'dodgy code!'
            outstream.write(chr(c1 + 33))
            outstream.write(chr(c2 + 33))
            outstream.write(chr(c3 + 33))
            outstream.write(chr(c4 + 33))
            outstream.write(chr(c5 + 33))

    # now we do the final bit at the end.  I repeated this separately as
    # the loop above is the time-critical part of a script, whereas this
    # happens only once at the end.

    #encode however many bytes we have as usual
    if remainder_size > 0:
        while len(lastbit) < 4:
            lastbit = lastbit + '\000'
        b1 = ord(lastbit[0])
        b2 = ord(lastbit[1])
        b3 = ord(lastbit[2])
        b4 = ord(lastbit[3])

        num = 16777216 * b1 + 65536 * b2 + 256 * b3 + b4

        #solve for c1..c5
        temp, c5 = divmod(num, 85)
        temp, c4 = divmod(temp, 85)
        temp, c3 = divmod(temp, 85)
        c1, c2 = divmod(temp, 85)

        #print 'encoding: %d %d %d %d -> %d -> %d %d %d %d %d' % (
        #    b1,b2,b3,b4,num,c1,c2,c3,c4,c5)
        lastword = chr(c1 + 33) + chr(c2 + 33) + chr(c3 + 33) + chr(
            c4 + 33) + chr(c5 + 33)
        #write out most of the bytes.
        outstream.write(lastword[0:remainder_size + 1])

    #terminator code for ascii 85
    outstream.write('~>')
    outstream.seek(0)
    return outstream.read()
    def addHeaders(self, hdrs, request):
        # Generate response
        self.generateResponse(request)

        # Generate header
        os = StringIO()
        os.write("Digest username=\"%s\"," % (self.fields['username'], ))
        os.write(" realm=\"%s\"," % (self.fields['realm'], ))
        os.write(" nonce=\"%s\"," % (self.fields['nonce'], ))
        os.write(" uri=\"%s\"," % (request.getURL(), ))
        if "qop" in self.fields:
            os.write(" qop=auth,")
            os.write(" nc=%s," % (self.fields['nc'], ))
            os.write(" cnonce=\"%s\"," % (self.fields['cnonce'], ))
        os.write(" response=\"%s\"" % (self.response, ))

        if "algorithm" in self.fields:
            os.write(", algorithm=\"%s\"" % (self.fields['algorithm'], ))
        if "opaque" in self.fields:
            os.write(", opaque=\"%s\"" % (self.fields['opaque'], ))

        hdrs.append((headers.Authorization, os.getvalue()))
Example #35
0
def readASC(filename, headonly=False, skip=0, delta=None, length=None,
            **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Handler ASCII file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: ASCII file to be read.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :type skip: int, optional
    :param skip: Number of lines to be skipped from top of file. If defined
        only one trace is read from file.
    :type delta: float, optional
    :param delta: If "skip" is used, "delta" defines sample offset in seconds.
    :type length: int, optional
    :param length: If "skip" is used, "length" defines the number of values to
        be read.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/QFILE-TEST-ASC.ASC")
    >>> st  # doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    3 Trace(s) in Stream:
    .TEST..BHN | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .TEST..BHE | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .WET..HHZ  | 2010-01-01T01:01:05.999000Z - ... | 100.0 Hz, 4001 samples
    """
    fh = open(filename, 'rt')
    # read file and split text into channels
    channels = []
    headers = {}
    data = StringIO()
    for line in fh.readlines()[skip:]:
        if line.isspace():
            # blank line
            # check if any data fetched yet
            if len(headers) == 0 and data.len == 0:
                continue
            # append current channel
            data.seek(0)
            channels.append((headers, data))
            # create new channel
            headers = {}
            data = StringIO()
            if skip:
                # if skip is set only one trace is read, everything else makes
                # no sense.
                break
            continue
        elif line[0].isalpha():
            # header entry
            key, value = line.split(':', 1)
            key = key.strip()
            value = value.strip()
            headers[key] = value
        elif not headonly:
            # data entry - may be written in multiple columns
            data.write(line.strip() + ' ')
    fh.close()
    # create ObsPy stream object
    stream = Stream()
    # custom header
    custom_header = {}
    if delta:
        custom_header["delta"] = delta
    if length:
        custom_header["npts"] = length

    for headers, data in channels:
        # create Stats
        header = Stats(custom_header)
        header['sh'] = {}
        channel = [' ', ' ', ' ']
        # generate headers
        for key, value in headers.iteritems():
            if key == 'DELTA':
                header['delta'] = float(value)
            elif key == 'LENGTH':
                header['npts'] = int(value)
            elif key == 'CALIB':
                header['calib'] = float(value)
            elif key == 'STATION':
                header['station'] = value
            elif key == 'COMP':
                channel[2] = value[0]
            elif key == 'CHAN1':
                channel[0] = value[0]
            elif key == 'CHAN2':
                channel[1] = value[0]
            elif key == 'START':
                # 01-JAN-2009_01:01:01.0
                # 1-OCT-2009_12:46:01.000
                header['starttime'] = toUTCDateTime(value)
            else:
                # everything else gets stored into sh entry
                if key in SH_KEYS_INT:
                    header['sh'][key] = int(value)
                elif key in SH_KEYS_FLOAT:
                    header['sh'][key] = float(value)
                else:
                    header['sh'][key] = value
        # set channel code
        header['channel'] = ''.join(channel)
        if headonly:
            # skip data
            stream.append(Trace(header=header))
        else:
            # read data
            data = loadtxt(data, dtype='float32', ndlim=1)

            # cut data if requested
            if skip and length:
                data = data[:length]

            # use correct value in any case
            header["npts"] = len(data)

            stream.append(Trace(data=data, header=header))
    return stream
Example #36
0
    outputMlfFileLab = StringIO()
    outputTrnFile = StringIO()
    outputPtbFile = StringIO()
else:
    outputMlfFile = codecs.open(outputMlf, "w", "UTF-8")
    outputPush2MlfFile = codecs.open(outputMlf + ".push2.mlf", "w", "UTF-8")
    outputMlfFileLab = codecs.open(outputMlf + ".dcd", "w", "UTF-8")
    outputTrnFile = codecs.open(outputMlf + ".trn", "w", "UTF-8")
    outputPtbFile = codecs.open(outputMlf + ".ptb", "w", "UTF-8")


outputMlfFileSmntcs = codecs.open(outputMlf + ".smntcs", "w", "UTF-8")
outputPush2MlfFileSmntcs = codecs.open(outputMlf + ".push2.smntcs", "w", "UTF-8")
outputCuedFileSmntcs = codecs.open(outputMlf + ".cued", "w", "UTF-8")

outputMlfFile.write("#!MLF!#\n")
outputPush2MlfFile.write("#!MLF!#\n")
outputMlfFileLab.write("#!MLF!#\n")
outputMlfFileSmntcs.write("#!MLF!#\n")
outputPush2MlfFileSmntcs.write("#!MLF!#\n")


trn_id = 0

for fileName in lst:
    push2 = False
    
    if verbose:
        print("Reading file: " + fileName)

    smntcs = readSemanticsFromCMBFile(fileName, lemmatized=lemmatized)
Example #37
0
def _getInputFromUser(param):
    """
    this private func reads the data from the user
    for the given param
    """
    loop = True
    userInput = None

    try:
        if param.USE_DEFAULT:
            logging.debug("setting default value (%s) for key (%s)" % (mask(param.DEFAULT_VALUE), param.CONF_NAME))
            controller.CONF[param.CONF_NAME] = param.DEFAULT_VALUE
        else:
            while loop:
                # If the value was not supplied by the command line flags
                if param.CONF_NAME not in commandLineValues:
                    message = StringIO()
                    message.write(param.PROMPT)

                    val_list = param.VALIDATORS or []
                    if(validators.validate_regexp not in val_list
                       and param.OPTION_LIST):
                        message.write(" [%s]" % "|".join(param.OPTION_LIST))

                    if param.DEFAULT_VALUE:
                        message.write("  [%s] " % (str(param.DEFAULT_VALUE)))

                    message.write(": ")
                    message.seek(0)
                    # mask password or hidden fields

                    if (param.MASK_INPUT):
                        userInput = getpass.getpass("%s :" % (param.PROMPT))
                    else:
                        userInput = raw_input(message.read())
                else:
                    userInput = commandLineValues[param.CONF_NAME]
                # If DEFAULT_VALUE is set and user did not input anything
                if userInput == "" and len(str(param.DEFAULT_VALUE)) > 0:
                    userInput = param.DEFAULT_VALUE

                # Param processing
                userInput = process_param_value(param, userInput)

                # If param requires validation
                try:
                    validate_param_value(param, userInput)
                    controller.CONF[param.CONF_NAME] = userInput
                    loop = False
                except ParamValidationError:
                    if param.LOOSE_VALIDATION:
                        # If validation failed but LOOSE_VALIDATION is true, ask user
                        answer = _askYesNo("User input failed validation, "
                                           "do you still wish to use it")
                        loop = not answer
                        if answer:
                            controller.CONF[param.CONF_NAME] = userInput
                            continue
                        else:
                            if param.CONF_NAME in commandLineValues:
                                del commandLineValues[param.CONF_NAME]
                    else:
                        # Delete value from commandLineValues so that we will prompt the user for input
                        if param.CONF_NAME in commandLineValues:
                            del commandLineValues[param.CONF_NAME]
                        loop = True
    except KeyboardInterrupt:
        # add the new line so messages wont be displayed in the same line as the question
        print("")
        raise
    except:
        logging.error(traceback.format_exc())
        raise Exception(output_messages.ERR_EXP_READ_INPUT_PARAM % (param.CONF_NAME))
Example #38
0
 def write(self, msg=''):
     if not self._filter(msg):
         StringIO.write(self, msg)
    def render(self):
        self.computeWidths()
        wrappers = []
        for col in self.columns:
            wrappers.append(TextWrapper(col.width))
        width = self.width + len(self.columnSep) * (len(self.columns) - 1)

        writer = StringIO()

        # renderHeader

        title = self.get_title()
        if title is not None:
            writer.write(title + "\n")
            writer.write("=" * len(title) + "\n")

        # wrap header labels:
        headerCells = []
        headerHeight = 1
        i = 0
        for col in self.columns:
            cell = wrappers[i].wrap(col.getLabel())
            headerCells.append(cell)
            headerHeight = max(headerHeight, len(cell))
            i += 1

        for cell in headerCells:
            vfill(cell, TOP, headerHeight)

        for i in range(headerHeight):
            writer.write(self._row_as_text(headerCells, i, self.columnSep))

        l = [self.columnHeaderSep * col.width for col in self.columns]
        writer.write("+".join(l) + "\n")

        #queryset = self.report.get_queryset(master_instance=None,flt=self.flt)
        #queryset=self.report.queryset
        #print queryset.count()
        for obj in self.queryset:
            wrappedCells = []
            for col in self.columns:
                v = col.cell_value(obj, None)
                if v is None:
                    v = ''
                l = wrappers[col.index].wrap(col.format(v))
                if len(l) == 0:
                    wrappedCells.append([''])
                else:
                    wrappedCells.append(l)

            # find out rowHeight for this row
            if self.rowHeight is None:
                rowHeight = 1
                for linelist in wrappedCells:
                    rowHeight = max(rowHeight, len(linelist))
            else:
                rowHeight = self.rowHeight

            if rowHeight == 1:
                writer.write(self._row_as_text(wrappedCells, 0,
                                               self.columnSep))
            else:
                # vfill each cell:
                for j in range(len(self.columns)):
                    vfill(wrappedCells[j], self.columns[j].valign, rowHeight)

                for i in range(rowHeight):
                    writer.write(
                        self._row_as_text(wrappedCells, i, self.columnSep))
        return writer.getvalue()
Example #40
0
 def get_mock_filestream(self, somestring):
     stream = StringIO()
     stream.write(somestring)
     stream.seek(0)
     return stream
for i in range(0, 6):
  print("Setting fan {} to {}".format(i, fan))
  idx = 88 + 4 * i
  s[idx+0] = "80"
  s[idx+1] = "00"
  s[idx+2] = "00"
  s[idx+3] = format(1 << 5 | fan & 7, "x")

output = StringIO()
off = 0
count = 0
line = ''

while True:
  output.write(s[off])
  off = off + 1
  count = count + 1
  if count == 8 :
    output.write("  ")
  elif count == 16:
    output.write("\n")
    count=0
  else:
    output.write(" ")
  if off >= len(s):
    break

output.write("\n")
p = Popen(['sg_ses', '-p', '0x2', device, '--control', '--data', '-'], stdout=PIPE, stdin=PIPE, stderr=PIPE)
print("Set fan speeds... Waiting to get fan speeds (ctrl+c to skip)")
Example #42
0
    def xmlTags(self):
        # constants
        header = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\"><plist version=\"1.0\"><dict>\n"
        castheader = "<key>cast</key><array>\n"
        writerheader = "<key>screenwriters</key><array>\n"
        directorheader = "<key>directors</key><array>\n"
        subfooter = "</array>\n"
        footer = "</dict></plist>\n"

        output = StringIO()
        output.write(header)

        # Write actors
        output.write(castheader)
        for a in self.showdata['_actors'][:5]:
            if a is not None and a['name'] is not None:
                output.write(
                    "<dict><key>name</key><string>%s</string></dict>\n" %
                    a['name'].encode('ascii', errors='ignore'))
        output.write(subfooter)

        # Write screenwriterr
        if self.writer is not None:
            output.write(writerheader)
            for name in self.writer.split("|"):
                if name != "":
                    output.write(
                        "<dict><key>name</key><string>%s</string></dict>\n" %
                        name.encode('ascii', errors='ignore'))
            output.write(subfooter)

        # Write directors
        if self.director is not None:
            output.write(directorheader)
            for name in self.director.split("|"):
                if name != "":
                    output.write(
                        "<dict><key>name</key><string>%s</string></dict>\n" %
                        name.encode('ascii', errors='ignore'))
            output.write(subfooter)

        # Close XML
        output.write(footer)
        return output.getvalue()
def _show_graph(objs, edge_func, swap_source_target, max_depth=3, extra_ignore=(), filter=None, too_many=10, highlight=None, filename=None, extra_info=None, refcounts=False, shortnames=True, output=None, cull_func=None):
    if not _isinstance(objs, (list, tuple)):
        objs = [objs]
    is_interactive = False
    if filename and output:
        raise ValueError('Cannot specify both output and filename.')
    elif output:
        f = output
    elif filename and filename.endswith('.dot'):
        f = codecs.open(filename, 'w', encoding='utf-8')
        dot_filename = filename
    elif IS_INTERACTIVE:
        is_interactive = True
        f = StringIO()
    else:
        fd, dot_filename = tempfile.mkstemp(prefix='objgraph-', suffix='.dot', text=True)
        f = os.fdopen(fd, 'w')
        if getattr(f, 'encoding', None):
            import io
            f = io.TextIOWrapper(f.detach(), 'utf-8')
    f.write('digraph ObjectGraph {\n  node[shape=box, style=filled, fillcolor=white];\n')
    queue = []
    depth = {}
    ignore = set(extra_ignore)
    ignore.add(id(objs))
    ignore.add(id(extra_ignore))
    ignore.add(id(queue))
    ignore.add(id(depth))
    ignore.add(id(ignore))
    ignore.add(id(sys._getframe()))
    ignore.add(id(sys._getframe().f_locals))
    ignore.add(id(sys._getframe(1)))
    ignore.add(id(sys._getframe(1).f_locals))
    for obj in objs:
        f.write('  %s[fontcolor=red];\n' % _obj_node_id(obj))
        depth[id(obj)] = 0
        queue.append(obj)
        del obj

    gc.collect()
    nodes = 0
    while queue:
        nodes += 1
        target = queue.pop(0)
        tdepth = depth[id(target)]
        f.write('  %s[label="%s"];\n' % (_obj_node_id(target), _obj_label(target, extra_info, refcounts, shortnames)))
        h, s, v = _gradient((0, 0, 1), (0, 0, 0.3), tdepth, max_depth)
        if inspect.ismodule(target):
            h = 0.3
            s = 1
        if highlight and highlight(target):
            h = 0.6
            s = 0.6
            v = 0.5 + v * 0.5
        f.write('  %s[fillcolor="%g,%g,%g"];\n' % (_obj_node_id(target),
         h,
         s,
         v))
        if v < 0.5:
            f.write('  %s[fontcolor=white];\n' % _obj_node_id(target))
        if hasattr(getattr(target, '__class__', None), '__del__'):
            f.write('  %s->%s_has_a_del[color=red,style=dotted,len=0.25,weight=10];\n' % (_obj_node_id(target), _obj_node_id(target)))
            f.write('  %s_has_a_del[label="__del__",shape=doublecircle,height=0.25,color=red,fillcolor="0,.5,1",fontsize=6];\n' % _obj_node_id(target))
        if tdepth >= max_depth:
            continue
        if cull_func is not None and cull_func(target):
            continue
        neighbours = edge_func(target)
        ignore.add(id(neighbours))
        n = 0
        skipped = 0
        for source in neighbours:
            if id(source) in ignore:
                continue
            if filter and not filter(source):
                continue
            if n >= too_many:
                skipped += 1
                continue
            if swap_source_target:
                srcnode, tgtnode = target, source
            else:
                srcnode, tgtnode = source, target
            elabel = _edge_label(srcnode, tgtnode, shortnames)
            f.write('  %s -> %s%s;\n' % (_obj_node_id(srcnode), _obj_node_id(tgtnode), elabel))
            if id(source) not in depth:
                depth[id(source)] = tdepth + 1
                queue.append(source)
            n += 1
            del source

        del neighbours
        if skipped > 0:
            h, s, v = _gradient((0, 1, 1), (0, 1, 0.3), tdepth + 1, max_depth)
            if swap_source_target:
                label = '%d more references' % skipped
                edge = '%s->too_many_%s' % (_obj_node_id(target), _obj_node_id(target))
            else:
                label = '%d more backreferences' % skipped
                edge = 'too_many_%s->%s' % (_obj_node_id(target), _obj_node_id(target))
            f.write('  %s[color=red,style=dotted,len=0.25,weight=10];\n' % edge)
            f.write('  too_many_%s[label="%s",shape=box,height=0.25,color=red,fillcolor="%g,%g,%g",fontsize=6];\n' % (_obj_node_id(target),
             label,
             h,
             s,
             v))
            f.write('  too_many_%s[fontcolor=white];\n' % _obj_node_id(target))

    f.write('}\n')
    if output:
        return
    elif is_interactive:
        return graphviz.Source(f.getvalue())
    else:
        f.close()
        print('Graph written to %s (%d nodes)' % (dot_filename, nodes))
        _present_graph(dot_filename, filename)
        return
Example #44
0
class Message(object):
    """
    An SSH2 message is a stream of bytes that encodes some combination of
    strings, integers, bools, and infinite-precision integers (known in Python
    as longs).  This class builds or breaks down such a byte stream.

    Normally you don't need to deal with anything this low-level, but it's
    exposed for people implementing custom extensions, or features that
    paramiko doesn't support yet.
    """

    big_int = long(0xff000000)

    def __init__(self, content=None):
        """
        Create a new SSH2 message.

        :param str content:
            the byte stream to use as the message content (passed in only when
            decomposing a message).
        """
        if content is not None:
            self.packet = StringIO(content)
        else:
            self.packet = StringIO()

    def __str__(self):
        """
        Return the byte stream content of this message, as a string/bytes obj.
        """
        return self.asbytes()

    def __repr__(self):
        """
        Returns a string representation of this object, for debugging.
        """
        return 'paramiko.Message(' + repr(self.packet.getvalue()) + ')'

    def asbytes(self):
        """
        Return the byte stream content of this Message, as bytes.
        """
        return self.packet.getvalue()

    def add_bytes(self, b):
        """
        Write bytes to the stream, without any formatting.

        :param str b: bytes to add
        """
        self.packet.write(b)
        return self

    def add_byte(self, b):
        """
        Write a single byte to the stream, without any formatting.

        :param str b: byte to add
        """
        self.packet.write(b)
        return self

    def add_boolean(self, b):
        """
        Add a boolean value to the stream.

        :param bool b: boolean value to add
        """
        if b:
            self.packet.write(one_byte)
        else:
            self.packet.write(zero_byte)
        return self

    def add_size(self, n):
        """
        Add an integer to the stream.

        :param int n: integer to add
        """
        self.packet.write(struct.pack('>I', n))
        return self

    def add_int(self, n):
        """
        Add an integer to the stream.

        :param int n: integer to add
        """
        if n >= Message.big_int:
            self.packet.write(max_byte)
            self.add_string(deflate_long(n))
        else:
            self.packet.write(struct.pack('>I', n))
        return self

    def add_int(self, n):
        """
        Add an integer to the stream.

        @param n: integer to add
        @type n: int
        """
        if n >= Message.big_int:
            self.packet.write(max_byte)
            self.add_string(deflate_long(n))
        else:
            self.packet.write(struct.pack('>I', n))
        return self

    def add_int64(self, n):
        """
        Add a 64-bit int to the stream.

        :param long n: long int to add
        """
        self.packet.write(struct.pack('>Q', n))
        return self

    def add_mpint(self, z):
        """
        Add a long int to the stream, encoded as an infinite-precision
        integer.  This method only works on positive numbers.

        :param long z: long int to add
        """
        self.add_string(deflate_long(z))
        return self

    def add_string(self, s):
        """
        Add a string to the stream.

        :param str s: string to add
        """
        self.add_size(len(s))
        self.packet.write(s)
        return self

    def add_list(self, l):
        """
        Add a list of strings to the stream.  They are encoded identically to
        a single string of values separated by commas.  (Yes, really, that's
        how SSH2 does it.)

        :param list l: list of strings to add
        """
        self.add_string(','.join(l))
        return self

    def _add(self, i):
        if type(i) is bool:
            return self.add_boolean(i)
        elif isinstance(i, int):
            return self.add_int(i)
        elif type(i) is list:
            return self.add_list(i)
        else:
            return self.add_string(i)

    def add(self, *seq):
        """
        Add a sequence of items to the stream.  The values are encoded based
        on their type: str, int, bool, list, or long.

        .. warning::
            Longs are encoded non-deterministically.  Don't use this method.

        :param seq: the sequence of items
        """
        for item in seq:
            self._add(item)
Example #45
0
def error_message(args, exc):
    out = StringIO()
    parts = urlparse(args.server)

    if args.debug:
        traceback.print_exc(file=out)
        out.write('\n')

    out.write("ERROR: Custodia command '{args.sub} {args.name}' failed.\n")
    if args.verbose:
        out.write("Custodia server '{args.server}'.\n")

    if isinstance(exc, RequestsHTTPError):
        errcode = E_HTTP_ERROR
        out.write("{exc.__class__.__name__}: {exc}\n")
    elif isinstance(exc, ConnectionError):
        errcode = E_CONNECTION_ERROR
        if parts.scheme == 'http+unix':
            out.write("Failed to connect to Unix socket '{unix_path}':\n")
        else:
            out.write("Failed to connect to '{parts.netloc}' "
                      "({parts.scheme}):\n")
        # ConnectionError always contains an inner exception
        out.write("    {exc.args[0]}\n")
    elif isinstance(exc, JSONDecodeError):
        errcode = E_JSON_ERROR
        out.write("Server returned invalid JSON response:\n")
        out.write("    {exc}\n")
    else:
        errcode = E_OTHER
        out.write("{exc.__class__.__name__}: {exc}\n")

    msg = out.getvalue()
    if not msg.endswith('\n'):
        msg += '\n'
    return errcode, msg.format(args=args, exc=exc, parts=parts,
                               unix_path=unquote(parts.netloc))
Example #46
0
    def launch_command(self,
                       main_cmd,
                       mount_points=None,
                       dry=False,
                       verbose=False):
        default_config = dict(
            image_id=self.image_id,
            instance_type=self.instance_type,
            key_name=self.aws_key_name,
            spot_price=self.spot_price,
            iam_instance_profile_name=self.iam_instance_profile_name,
            security_groups=self.security_groups,
            security_group_ids=self.security_group_ids,
            network_interfaces=[],
        )
        aws_config = dict(default_config)
        if self.s3_log_name is None:
            exp_name = "{}-{}".format(self.s3_log_prefix, self.make_timekey())
        else:
            exp_name = self.s3_log_name
        exp_prefix = self.s3_log_prefix
        s3_base_dir = os.path.join(self.aws_s3_path,
                                   exp_prefix.replace("_", "-"), exp_name)
        stdout_log_s3_path = os.path.join(s3_base_dir,
                                          'stdout_$EC2_INSTANCE_ID.log')

        sio = StringIO()
        sio.write("#!/bin/bash\n")
        sio.write("truncate -s 0 /home/ubuntu/user_data.log\n")
        sio.write("{\n")
        sio.write(
            'die() { status=$1; shift; echo "FATAL: $*"; exit $status; }\n')
        sio.write(
            'EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id`"\n'
        )
        sio.write("""
            aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
        """.format(exp_name=exp_name, aws_region=self.region))
        sio.write("""
            aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=exp_prefix,Value={exp_prefix} --region {aws_region}
        """.format(exp_prefix=exp_prefix, aws_region=self.region))

        # Add swap file
        if self.gpu:
            swap_location = '/mnt/swapfile'
        else:
            swap_location = '/var/swap.1'
        sio.write(
            'sudo dd if=/dev/zero of={swap_location} bs=1M count={swap_size}\n'
            .format(swap_location=swap_location, swap_size=self.swap_size))
        sio.write('sudo mkswap {swap_location}\n'.format(
            swap_location=swap_location))
        sio.write('sudo chmod 600 {swap_location}\n'.format(
            swap_location=swap_location))
        sio.write('sudo swapon {swap_location}\n'.format(
            swap_location=swap_location))

        sio.write("service docker start\n")
        sio.write("docker --config /home/ubuntu/.docker pull {docker_image}\n".
                  format(docker_image=self.docker_image))
        sio.write("export AWS_DEFAULT_REGION={aws_region}\n".format(
            aws_region=self.s3_bucket_region))
        sio.write("""
            curl "https://s3.amazonaws.com/aws-cli/awscli-bundle.zip" -o "awscli-bundle.zip"
            unzip awscli-bundle.zip
            sudo ./awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
        """)

        mnt_args = ''
        py_path = []
        local_output_dir_and_s3_path = []
        max_sync_interval = 0
        for mount in mount_points:
            print('Handling mount: ', mount)
            if isinstance(
                    mount,
                    MountLocal):  # TODO: these should be mount_s3 objects
                if mount.read_only:
                    if mount.path_on_remote is None:
                        with mount.gzip() as gzip_file:
                            gzip_path = os.path.realpath(gzip_file)
                            file_hash = hash_file(gzip_path)
                            s3_path = self.s3_upload(
                                gzip_path,
                                self.s3_bucket,
                                remote_filename=file_hash + '.tar')
                        mount.path_on_remote = s3_path
                        mount.local_file_hash = gzip_path
                    else:
                        file_hash = mount.local_file_hash
                        s3_path = mount.path_on_remote
                    remote_tar_name = '/tmp/' + file_hash + '.tar'
                    remote_unpack_name = '/tmp/' + file_hash
                    sio.write("aws s3 cp {s3_path} {remote_tar_name}\n".format(
                        s3_path=s3_path, remote_tar_name=remote_tar_name))
                    sio.write("mkdir -p {local_code_path}\n".format(
                        local_code_path=remote_unpack_name))
                    sio.write(
                        "tar -xvf {remote_tar_name} -C {local_code_path}\n".
                        format(local_code_path=remote_unpack_name,
                               remote_tar_name=remote_tar_name))
                    mount_point = os.path.join(
                        '/mounts', mount.mount_point.replace('~/', ''))
                    mnt_args += ' -v %s:%s' % (os.path.join(
                        remote_unpack_name, os.path.basename(
                            mount.local_dir)), mount_point)
                    if mount.pythonpath:
                        py_path.append(mount_point)
                else:
                    raise ValueError()
            elif isinstance(mount, MountS3):
                # In theory the ec2_local_dir could be some random directory,
                # but we make it the same as the mount directory for
                # convenience.
                #
                # ec2_local_dir: directory visible to ec2 spot instance
                # moint_point: directory visible to docker running inside ec2
                #               spot instance
                ec2_local_dir = mount.mount_point
                s3_path = os.path.join(s3_base_dir, mount.s3_path)
                if self.num_exps == 1:
                    stdout_log_s3_path = os.path.join(
                        s3_path, 'stdout_$EC2_INSTANCE_ID.log')
                if not mount.output:
                    raise NotImplementedError()
                local_output_dir_and_s3_path.append((ec2_local_dir, s3_path))
                sio.write(
                    "mkdir -p {remote_dir}\n".format(remote_dir=ec2_local_dir))
                mnt_args += ' -v %s:%s' % (ec2_local_dir, mount.mount_point)

                # Sync interval
                sio.write("""
                while /bin/true; do
                    aws s3 sync --exclude '*' {include_string} {log_dir} {s3_path}
                    sleep {periodic_sync_interval}
                done & echo sync initiated
                """.format(include_string=mount.include_string,
                           log_dir=ec2_local_dir,
                           s3_path=s3_path,
                           periodic_sync_interval=mount.sync_interval))
                max_sync_interval = max(max_sync_interval, mount.sync_interval)

                # Sync on terminate. This catches the case where the spot
                # instance gets terminated before the user script ends.
                #
                # This is hoping that there's at least 3 seconds between when
                # the spot instance gets marked for  termination and when it
                # actually terminates.
                sio.write("""
                    while /bin/true; do
                        if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \  -f 2) ]
                        then
                            logger "Running shutdown hook."
                            aws s3 cp --recursive {log_dir} {s3_path}
                            aws s3 cp /home/ubuntu/user_data.log {stdout_log_s3_path}
                            break
                        else
                            # Spot instance not yet marked for termination.
                            # This is hoping that there's at least 3 seconds
                            # between when the spot instance gets marked for
                            # termination and when it actually terminates.
                            sleep 3
                        fi
                    done & echo log sync initiated
                """.format(
                    log_dir=ec2_local_dir,
                    s3_path=s3_path,
                    stdout_log_s3_path=stdout_log_s3_path,
                ))
            else:
                raise NotImplementedError()

        sio.write("""
        while /bin/true; do
            aws s3 cp /home/ubuntu/user_data.log {stdout_log_s3_path}
            sleep {periodic_sync_interval}
        done & echo sync initiated
        """.format(stdout_log_s3_path=stdout_log_s3_path,
                   periodic_sync_interval=max_sync_interval))

        if self.gpu:
            #sio.write('echo "LSMOD NVIDIA:"\n')
            #sio.write("lsmod | grep nvidia\n")
            #sio.write("echo 'Waiting for dpkg lock...'\n")
            # wait for lock
            #sio.write("""
            #    while sudo fuser /var/lib/dpkg/lock >/dev/null 2>&1; do
            #       sleep 1
            #    done
            #""")
            #sio.write("sudo apt-get install nvidia-modprobe\n")
            #sio.write("wget -P /tmp https://github.com/NVIDIA/nvidia-docker/releases/download/v1.0.1/nvidia-docker_1.0.1-1_amd64.deb\n")
            #sio.write("sudo dpkg -i /tmp/nvidia-docker*.deb && rm /tmp/nvidia-docker*.deb\n")
            sio.write("""
                for i in {1..800}; do su -c "nvidia-modprobe -u -c=0" ubuntu && break || sleep 3; done
                systemctl start nvidia-docker
            """)
            sio.write("echo 'Testing nvidia-smi'\n")
            sio.write("nvidia-smi\n")
            sio.write("echo 'Testing nvidia-smi inside docker'\n")
            sio.write(
                "nvidia-docker run --rm {docker_image} nvidia-smi\n".format(
                    docker_image=self.docker_image))

        if self.checkpoint and self.checkpoint.restore:
            raise NotImplementedError()
        else:
            docker_cmd = self.get_docker_cmd(main_cmd,
                                             use_tty=False,
                                             extra_args=mnt_args,
                                             pythonpath=py_path,
                                             use_docker_generated_name=True)
        assert self.num_exps > 0
        for _ in range(self.num_exps - 1):
            sio.write(docker_cmd + ' &\n')
        sio.write(docker_cmd + '\n')

        # Sync all output mounts to s3 after running the user script
        # Ideally the earlier while loop would be sufficient, but it might be
        # the case that the earlier while loop isn't fast enough to catch a
        # termination. So, we explicitly sync on termination.
        for (local_output_dir, s3_dir_path) in local_output_dir_and_s3_path:
            sio.write("aws s3 cp --recursive {local_dir} {s3_dir}\n".format(
                local_dir=local_output_dir, s3_dir=s3_dir_path))

        sio.write("aws s3 cp /home/ubuntu/user_data.log {}\n".format(
            stdout_log_s3_path, ))

        # Wait for last sync
        if max_sync_interval > 0:
            sio.write("sleep {}\n".format(max_sync_interval + 5))

        if self.terminate:
            sio.write("""
                EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id || die \"wget instance-id has failed: $?\"`"
                aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}
            """.format(aws_region=self.region))
        sio.write("} >> /home/ubuntu/user_data.log 2>&1\n")

        full_script = dedent(sio.getvalue())
        import boto3
        import botocore
        ec2 = boto3.client(
            "ec2",
            region_name=self.region,
            aws_access_key_id=self.credentials.aws_key,
            aws_secret_access_key=self.credentials.aws_secret_key,
        )

        if len(full_script) > 10000 or len(
                base64.b64encode(
                    full_script.encode()).decode("utf-8")) > 10000:
            s3_path = self.upload_file_to_s3(full_script, dry=dry)
            sio = StringIO()
            sio.write("#!/bin/bash\n")
            sio.write("""
            aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\
            chmod +x /home/ubuntu/remote_script.sh && \\
            bash /home/ubuntu/remote_script.sh
            """.format(s3_path=s3_path, aws_region=self.s3_bucket_region))
            user_data = dedent(sio.getvalue())
        else:
            user_data = full_script

        if verbose:
            print(full_script)
            with open("/tmp/full_ec2_script", "w") as f:
                f.write(full_script)

        instance_args = dict(
            ImageId=aws_config["image_id"],
            KeyName=aws_config["key_name"],
            UserData=user_data,
            InstanceType=aws_config["instance_type"],
            EbsOptimized=False,
            SecurityGroups=aws_config["security_groups"],
            SecurityGroupIds=aws_config["security_group_ids"],
            NetworkInterfaces=aws_config["network_interfaces"],
            IamInstanceProfile=dict(
                Name=aws_config["iam_instance_profile_name"], ),
            #**config.AWS_EXTRA_CONFIGS,
        )
        if self.extra_ec2_instance_kwargs is not None:
            instance_args.update(self.extra_ec2_instance_kwargs)

        if verbose:
            print(
                "************************************************************")
            print('UserData:', instance_args["UserData"])
            print(
                "************************************************************")
        instance_args["UserData"] = base64.b64encode(
            instance_args["UserData"].encode()).decode("utf-8")
        spot_args = dict(
            DryRun=dry,
            InstanceCount=1,
            LaunchSpecification=instance_args,
            SpotPrice=aws_config["spot_price"],
            # ClientToken=params_list[0]["exp_name"],
        )

        import pprint

        if verbose:
            pprint.pprint(spot_args)
        if not dry:
            response = ec2.request_spot_instances(**spot_args)
            print('Launched EC2 job - Server response:')
            pprint.pprint(response)
            print('*****' * 5)
            spot_request_id = response['SpotInstanceRequests'][0][
                'SpotInstanceRequestId']
            for _ in range(10):
                try:
                    ec2.create_tags(
                        Resources=[spot_request_id],
                        Tags=[{
                            'Key': 'Name',
                            'Value': exp_name
                        }],
                    )
                    break
                except botocore.exceptions.ClientError:
                    continue
Example #47
0
 def serialize(subpours):
   buf = StringIO()
   for pour in subpours:
     buf.write('LINEAR\n')
     # theta initial and rate
     buf.write('0.0\n' + str(pour.angle_rate) + '\n')
     # radius initial and rate
     buf.write('0.0\n' + str(pour.radius_rate) + '\n')
     # radius scale (in)
     buf.write('2.0\n')
     # time and pump on or off
     buf.write(str(float(pour.duration)) + '\n1\n')
     # temperature (F)
     buf.write('67\n')
     buf.write('LINEAR\n')
     buf.write('0.0\n0.0\n')
     radius = pour.radius_rate * pour.duration
     buf.write(str(radius) + '\n' + str(-radius/2.0) + '\n')
     buf.write('2.0\n')
     buf.write('2.0\n1\n')
     buf.write('200\n')
   buf.write('END\n')
   return buf.getvalue()
Example #48
0
def CmdTask(args=[], workdir=None):
    currdir = None
    if workdir:
        currdir = os.getcwdu()
        os.chdir(workdir)
    si = None
    if os.name != 'posix':
        from subprocess import STARTUPINFO
        si = STARTUPINFO()
        si.dwFlags |= STARTF_USESHOWWINDOW
        si.wShowWindow = SW_HIDE
    popen_args = {
        'args': args,
        'stdout': PIPE,
        'stderr': STDOUT,
        'universal_newlines': True,
        'startupinfo': si,
        'shell': False,  # mencoder cannot be terminated if shell is True
    }

    code = 0
    buf = StringIO()
    pos = 0
    p = Popen(**popen_args)
    try:
        cmdline = ' '.join(args)
        (yield TaskOutput('START: {} ...'.format(cmdline)))
        if popen_args['universal_newlines']:
            while True:
                line = p.stdout.readline()
                if line == "": break
                if not (yield TaskOutput(line, OutputType.OUTPUT)):
                    raise CommandTerminated()
        else:
            lastline = ""
            while True:
                line = p.stdout.read(512)
                if line == "": break
                buf.seek(0)
                buf.write(line.replace("\r", "\n"))
                buf.seek(0)
                while True:
                    l = buf.readline()
                    if not l.endswith("\n"):
                        lastline = l
                        break
                    outline = lastline + l.rstrip()
                    lastline = ""
                    if outline == "":
                        continue
                    if not (yield TaskOutput(outline, OutputType.OUTPUT)):
                        raise CommandTerminated()

        (yield TaskOutput('END: {} ...'.format(args[0])))
    except CommandTerminated:
        code = -2
        (yield TaskOutput('TERMINITED: {}'.format(args[0]), OutputType.WARN))
        try:
            p.terminate()
            p.wait()
        except:
            pass
    except Exception as ex:
        code = -1
        errmsg = ex.message or ex.args[-1]
        (yield TaskOutput(errmsg, OutputType.ERROR))
    finally:
        buf.close()
        if currdir:
            os.chdir(currdir)
        (yield TaskOutput('EXIT {}'.format(code), OutputType.NOTIFY))
Example #49
0
 def _serialize(obj, keypos):
     if keypos:
         if isinstance(obj, (int, long, float, bool)):
             return ('i:%i;' % obj).encode('latin1')
         if isinstance(obj, basestring):
             encoded_obj = obj
             if isinstance(obj, unicode):
                 encoded_obj = obj.encode(charset, errors)
             s = BytesIO()
             s.write(b's:')
             s.write(str(len(encoded_obj)).encode('latin1'))
             s.write(b':"')
             s.write(encoded_obj)
             s.write(b'";')
             return s.getvalue()
         if obj is None:
             return b's:0:"";'
         raise TypeError('can\'t serialize %r as key' % type(obj))
     else:
         if obj is None:
             return b'N;'
         if isinstance(obj, bool):
             return ('b:%i;' % obj).encode('latin1')
         if isinstance(obj, (int, long)):
             return ('i:%s;' % obj).encode('latin1')
         if isinstance(obj, float):
             return ('d:%s;' % obj).encode('latin1')
         if isinstance(obj, basestring):
             encoded_obj = obj
             if isinstance(obj, unicode):
                 encoded_obj = obj.encode(charset, errors)
             s = BytesIO()
             s.write(b's:')
             s.write(str(len(encoded_obj)).encode('latin1'))
             s.write(b':"')
             s.write(encoded_obj)
             s.write(b'";')
             return s.getvalue()
         if isinstance(obj, (list, tuple, dict)):
             out = []
             if isinstance(obj, dict):
                 iterable = obj.items()
             else:
                 iterable = enumerate(obj)
             for key, value in iterable:
                 out.append(_serialize(key, True))
                 out.append(_serialize(value, False))
             return b''.join([
                 b'a:',
                 str(len(obj)).encode('latin1'),
                 b':{',
                 b''.join(out),
                 b'}'
             ])
         if isinstance(obj, phpobject):
             return b'O' + _serialize(obj.__name__, True)[1:-1] + \
                    _serialize(obj.__php_vars__, False)[1:]
         if object_hook is not None:
             return _serialize(object_hook(obj), False)
         raise TypeError('can\'t serialize %r' % type(obj))
Example #50
0
    def syndicateThis(self, lang=None):
        """ Render this comment as a fragment of an RSS 1.0 feed """
        out = StringIO()
        if lang is None:
            lang = self.gl_get_selected_language()

        title = "Comment by %s: %s" % (self.author, self.title)
        out.write('<item rdf:about="%s">' % self.absolute_url())
        out.write('<link>%s</link>' % self.absolute_url())
        out.write('<title>%s</title>' % self.utXmlEncode(title))
        out.write('<description><![CDATA[%s]]></description>' %
                  self.utToUtf8(self.body))
        out.write('<dc:title>%s</dc:title>' % self.utXmlEncode(title))
        out.write('<dc:date>%s</dc:date>' %
                  self.utShowFullDateTimeHTML(self.releasedate))
        out.write('<dc:description><![CDATA[%s]]></dc:description>' %
                  self.utToUtf8(self.body))
        out.write('<dc:contributor>%s</dc:contributor>' %
                  self.utXmlEncode(self.author))
        out.write('<dc:language>%s</dc:language>' % self.utXmlEncode(lang))
        out.write('</item>')
        return out.getvalue()
Example #51
0
    def write_tsv(self, filename):
        """Write the transposed results table as a tab-separated file.

        Transposition of the results table puts the models in rows and the
        attributes in the columns, which is more suitable for most
        machine-learning/analysis algorithms.
        """

        output = StringIO()

        # Add the header line
        output.write('model_name\t')
        output.write('\t'.join([r.func_name for r in self.reporters]))
        output.write('\n')

        # Transpose the results list
        results = zip(*self.results)

        for model_name, result_row in zip(self.names, results):
            output.write(model_name + '\t')
            output.write('\t'.join([r.get_text() for r in result_row]))
            output.write('\n')

        with open(filename, 'w') as f:
            f.write(output.getvalue())
Example #52
0
    def _process_page(self, page, author=None, comment=None, ipnr=None):

        if self.boilerplate_start_re.search(page.text) != None:

            # If the audit info isn't available, grab it from the boilerplate page.
            if author == None or comment == None:
                page = WikiPage(self.env, page.name)
            if author == None:
                author = page.author
            if comment == None:
                comment = page.comment
            if ipnr == None:
                ipnr = '127.0.0.1'  # I don't know what else to do here.

            # Extract the boilerplate text and the wanted pages.
            buf = StringIO()
            page_list = {}
            inboilerplate = False
            for line in page.text.splitlines():
                if inboilerplate:
                    if self.boilerplate_end_re.search(line) != None:
                        inboilerplate = False
                    else:
                        buf.write(line)
                        buf.write('\n')
                else:
                    if self.boilerplate_start_re.search(line) != None:
                        inboilerplate = True
                    else:
                        if line.startswith('||') and line.endswith(
                                '||') and line[3] != "'":
                            try:
                                descriptor = ([
                                    i.strip()
                                    for i in line.strip('||').split('||')
                                ])
                                name = descriptor[0]
                                arguments = descriptor[1:]
                                m = self.extractpagename_re.search(name)
                                if m != None:
                                    name = m.string[m.start(1):m.end(1)]
                                    self.env.log.warning("extracted name = " +
                                                         name)
                                page_list[name] = arguments
                            except Exception, e:
                                self.env.log.warning(
                                    "Invalid page line: %s (%s)", line, e)

            # Generate the derived pages as instructed.
            page_names = page_list.keys()
            page_names.sort()
            for name in page_names:
                text = buf.getvalue()
                args = page_list[name]
                text = text.replace('{{0}}', name)
                i = 0
                for arg in args:
                    text = text.replace('{{%d}}' % (i + 1), args[i])
                    i += 1
                newpage = WikiPage(self.env, name)
                if newpage.text != text:
                    newpage.text = text
                    newpage.save(author, comment, ipnr)
Example #53
0

def print2both(out1, out2, s):
    out1.write(s)
    out2.write(s)


for p in ppdb_size_list:
    for d in dist_list:
        print2both(out1, out2, p + " " + d + "\n")
        print >> out1, "\t".join(["knn"] + cca_dim_list[:-1])
        print >> out2, "\t".join(["knn"] + cca_app_list)
        for k in knnK_list:
            print2both(out1, out2, k)
            for cd in cca_dim_list:
                out1.write("\t" + results["_".join(
                    [p, d, k, "1" if cd == "0" else "0", cd, "0", "0"])][:5])
            for ca in cca_app_list:
                out2.write("\t" +
                           results["_".join([p, d, k, "0", "0", "1", ca])][:5])
            print2both(out1, out2, "\n")
        print2both(out1, out2, "\n")
print out1.getvalue()
print out2.getvalue()
print "-4 indicates OutOfMemory"
print "-3 indicates job not complete"
print "-2 indicates that job crashed"
print "-1 indicates that there weren't enough labels in a class"
for k, e in fail_jid.items():
    print k, jid2param[k], e
Example #54
0
    def overlay(self,
                seg,
                position=0,
                loop=False,
                times=None,
                gain_during_overlay=None):
        """
        Overlay the provided segment on to this segment starting at the
        specificed position and using the specfied looping beahvior.

        seg (AudioSegment):
            The audio segment to overlay on to this one.

        position (optional int):
            The position to start overlaying the provided segment in to this
            one.

        loop (optional bool):
            Loop seg as many times as necessary to match this segment's length.
            Overrides loops param.

        times (optional int):
            Loop seg the specified number of times or until it matches this
            segment's length. 1 means once, 2 means twice, ... 0 would make the
            call a no-op
        gain_during_overlay (optional int):
            Changes this segment's volume by the specified amount during the
            duration of time that seg is overlaid on top of it. When negative,
            this has the effect of 'ducking' the audio under the overlay.
        """

        if loop:
            # match loop=True's behavior with new times (count) mechinism.
            times = -1
        elif times is None:
            # no times specified, just once through
            times = 1
        elif times == 0:
            # it's a no-op, make a copy since we never mutate
            return self._spawn(self._data)

        output = StringIO()

        seg1, seg2 = AudioSegment._sync(self, seg)
        sample_width = seg1.sample_width
        spawn = seg1._spawn

        output.write(seg1[:position]._data)

        # drop down to the raw data
        seg1 = seg1[position:]._data
        seg2 = seg2._data
        pos = 0
        seg1_len = len(seg1)
        seg2_len = len(seg2)
        while times:
            remaining = max(0, seg1_len - pos)
            if seg2_len >= remaining:
                seg2 = seg2[:remaining]
                seg2_len = remaining
                # we've hit the end, we're done looping (if we were) and this
                # is our last go-around
                times = 1

            if gain_during_overlay:
                seg1_overlaid = seg1[pos:pos + seg2_len]
                seg1_adjusted_gain = audioop.mul(
                    seg1_overlaid, self.sample_width,
                    db_to_float(float(gain_during_overlay)))
                output.write(
                    audioop.add(seg1_adjusted_gain, seg2, sample_width))
            else:
                output.write(
                    audioop.add(seg1[pos:pos + seg2_len], seg2, sample_width))
            pos += seg2_len

            # dec times to break our while loop (eventually)
            times -= 1

        output.write(seg1[pos:])

        return spawn(data=output)
Example #55
0
def color_url(path, **kwargs):
    """Color the parts of the url according to Spack's parsing.

       Colors are:
          Cyan: The version found by parse_version_offset().
          Red:  The name found by parse_name_offset().

          Green:   Instances of version string substituted by substitute_version().
          Magenta: Instances of the name (protected from substitution).

       Optional args:
          errors=True    Append parse errors at end of string.
          subs=True      Color substitutions as well as parsed name/version.

    """
    errors = kwargs.get('errors', False)
    subs   = kwargs.get('subs', False)

    (name, ns, nl, noffs,
     ver,  vs, vl, voffs) = substitution_offsets(path)

    nends = [no + nl - 1 for no in noffs]
    vends = [vo + vl - 1 for vo in voffs]

    nerr = verr = 0
    out = StringIO()
    for i in range(len(path)):
        if   i == vs:    out.write('@c'); verr += 1
        elif i == ns:    out.write('@r'); nerr += 1
        elif subs:
            if i in voffs: out.write('@g')
            elif i in noffs: out.write('@m')

        out.write(path[i])

        if   i == vs + vl - 1:  out.write('@.'); verr += 1
        elif i == ns + nl - 1:  out.write('@.'); nerr += 1
        elif subs:
            if i in vends or i in nends:
                out.write('@.')

    if errors:
        if nerr == 0: out.write(" @r{[no name]}")
        if verr == 0: out.write(" @r{[no version]}")
        if nerr == 1: out.write(" @r{[incomplete name]}")
        if verr == 1: out.write(" @r{[incomplete version]}")

    return colorize(out.getvalue())
Example #56
0
class RequestSendFileTestCase(unittest.TestCase):
    def setUp(self):
        self.status = None
        self.headers = None
        self.response = StringIO()
        self.dir = tempfile.mkdtemp(prefix='trac-')
        self.filename = os.path.join(self.dir, 'test.txt')
        self.data = 'contents\n'
        create_file(self.filename, self.data, 'wb')
        self.req = None

    def tearDown(self):
        if self.req and self.req._response:
            self.req._response.close()
        shutil.rmtree(self.dir)

    def _start_response(self, status, headers):
        self.status = status
        self.headers = dict(headers)

        def write(data):
            self.response.write(data)

        return write

    def _create_req(self,
                    use_xsendfile=False,
                    xsendfile_header='X-Sendfile',
                    **kwargs):
        req = Request(_make_environ(**kwargs), self._start_response)
        req.callbacks.update({
            'use_xsendfile': lambda r: use_xsendfile,
            'xsendfile_header': lambda r: xsendfile_header
        })
        self.req = req
        return req

    def test_send_file(self):
        req = self._create_req()
        self.assertRaises(RequestDone, req.send_file, self.filename,
                          'text/plain')
        self.assertEqual('200 Ok', self.status)
        self.assertEqual('text/plain', self.headers['Content-Type'])
        self.assertEqual(str(len(self.data)), self.headers['Content-Length'])
        self.assertNotIn('X-Sendfile', self.headers)
        self.assertEqual(self.data, ''.join(req._response))
        self.assertEqual('', self.response.getvalue())

    def test_send_file_with_xsendfile(self):
        req = self._create_req(use_xsendfile=True)
        self.assertRaises(RequestDone, req.send_file, self.filename,
                          'text/plain')
        self.assertEqual('200 Ok', self.status)
        self.assertEqual('text/plain', self.headers['Content-Type'])
        self.assertEqual(self.filename, self.headers['X-Sendfile'])
        self.assertEqual(None, req._response)
        self.assertEqual('', self.response.getvalue())

    def test_send_file_with_xsendfile_header(self):
        req = self._create_req(use_xsendfile=True,
                               xsendfile_header='X-Accel-Redirect')
        self.assertRaises(RequestDone, req.send_file, self.filename,
                          'text/plain')
        self.assertEqual('200 Ok', self.status)
        self.assertEqual('text/plain', self.headers['Content-Type'])
        self.assertEqual(self.filename, self.headers['X-Accel-Redirect'])
        self.assertNotIn('X-Sendfile', self.headers)
        self.assertEqual(None, req._response)
        self.assertEqual('', self.response.getvalue())

    def test_send_file_with_xsendfile_and_empty_header(self):
        req = self._create_req(use_xsendfile=True, xsendfile_header='')
        self.assertRaises(RequestDone, req.send_file, self.filename,
                          'text/plain')
        self.assertEqual('200 Ok', self.status)
        self.assertEqual('text/plain', self.headers['Content-Type'])
        self.assertEqual(str(len(self.data)), self.headers['Content-Length'])
        self.assertNotIn('X-Sendfile', self.headers)
        self.assertEqual(self.data, ''.join(req._response))
        self.assertEqual('', self.response.getvalue())
Example #57
0
    def __write_chk__(self, out_file):
        """pysimm.cassandra.__write_chk__

        Creates the CASSANDRA checkpoint file basing on the information from the `~GCMC.tot_sst` field
        """
        # Initializing output stream
        if out_file == 'string':
            out_stream = StringIO()
        else:
            out_stream = open(out_file, 'w+')
        blk_separ = ' {:*^75}\n'

        # Writing Translation/rotation/... info
        out_stream.write(
            blk_separ.format(
                'Translation,rotation, dihedral, angle distortion'))
        tmplate = '{t[0]$$}{t[1]$$}{t[2]$$}{t[3]$$}{t[4]$$}\n'
        molecules = self.props['Molecule_Files'].value
        for m, i in zip(molecules, range(len(molecules))):
            out_stream.write(
                tmplate.replace('$$', ':>6d').format(t=[i + 1, 0, 0, 0, 0]))
            out_stream.write(
                tmplate.replace('$$', ':>6d').format(t=[i + 1, 0, 0, 0, 0]))
            # TODO: There are some nonzeros in example .chk file for index 2; check where they come from
            out_stream.write(
                '{t[0]:>23.14E}{t[2]:>23.14E}{t[2]:>23.14E}\n'.format(
                    t=[0, 0, 0]))
            out_stream.write('{0:>12d}{0:>12d}\n'.format(0, 0))

        # Small section with total # of MC trials -- it is 0 at the beggining
        out_stream.write(blk_separ.format('# of MC steps'))
        out_stream.write('{:>12d}\n'.format(0))

        # Writing Box-info information
        out_stream.write(blk_separ.format('Box info'))
        tmp = self.props['Box_Info'].value['box_size']
        x, y, z = 0, 0, 0
        bx_type = None
        if isinstance(tmp, types.ListType):
            if len(tmp) > 3:
                x, y, z = tmp[0], tmp[1], tmp[2]
        elif isinstance(tmp, int) or isinstance(tmp, float):
            x, y, z = tmp, tmp, tmp
        else:
            exit(0)

        # First 0 here correspond to the # of trials
        out_stream.write('{0:>12d}\n{1:<18.10f}\n{2:}\n'.format(
            0, x * y * z, self.props['Box_Info'].value['box_type']))

        tmpl = '{t[0]&&}{t[1]&&}{t[2]&&}\n'
        tmp = np.diag([x, y, z])
        for lines in tmp:
            out_stream.write((tmpl.replace('&&', ':^22.14f')).format(t=lines))

        tmp = np.diag([1 / x, 1 / y, 1 / z])
        for lines in tmp:
            out_stream.write((tmpl.replace('&&', ':^22.8f')).format(t=lines))
        out_stream.write('{:>18.12f}\n'.format(0))

        # Creating seeds
        out_stream.write(blk_separ.format('SEEDS'))
        out_stream.write(
            '{t[0]:>12d}{t[1]:>12d}{t[2]:>12d}\n{t[3]:>12d}{t[4]:>12d}\n'.
            format(t=np.random.random_integers(int(1e+7), int(1e+8 - 1), 5)))

        # Writing total number of molecules by species
        out_stream.write(
            blk_separ.format('Info for total number of molecules'))
        out_stream.write('{0:>11d}{1:>11d}\n'.format(
            1, 1))  # Currentely only one polymer "molecule" in the simulation
        for i in range(1, len(molecules)):
            out_stream.write('{0:>11d}{1:>11d}\n'.format(i + 1, 0))

        out_stream.write(blk_separ.format('Writing coordinates of all boxes'))
        # Writing coordinates of atoms in all boxes
        line_template = '{l[0]:<5}{l[1]:<25.15f}{l[2]:<25.15f}{l[3]:<25.15f}{l[4]:>10d}\n'
        for parts in self.tot_sst.particles:
            try:
                out_stream.write(
                    line_template.format(
                        l=[parts.type.name, parts.x, parts.y, parts.z, 1]))
            except:
                continue
        out_stream.close()
Example #58
0
def extract_props(data):
    jtmp = StringIO()
    jtmp.write(data)
    jar = zipfile.ZipFile(jtmp)
    return StringIO(jar.read('config.xml')).read()
Example #59
0
class ResponseGroup(xml.sax.ContentHandler):
    """A Generic "Response Group", which can
    be anything from the entire list of Items to
    specific response elements within an item"""
    def __init__(self, connection=None, nodename=None):
        """Initialize this Item"""
        self._connection = connection
        self._nodename = nodename
        self._nodepath = []
        self._curobj = None
        self._xml = StringIO()

    def __repr__(self):
        return '<%s: %s>' % (self.__class__.__name__, self.__dict__)

    #
    # Attribute Functions
    #
    def get(self, name):
        return self.__dict__.get(name)

    def set(self, name, value):
        self.__dict__[name] = value

    def to_xml(self):
        return "<%s>%s</%s>" % (self._nodename, self._xml.getvalue(),
                                self._nodename)

    #
    # XML Parser functions
    #
    def startElement(self, name, attrs, connection):
        self._xml.write("<%s>" % name)
        self._nodepath.append(name)
        if len(self._nodepath) == 1:
            obj = ResponseGroup(self._connection)
            self.set(name, obj)
            self._curobj = obj
        elif self._curobj:
            self._curobj.startElement(name, attrs, connection)
        return None

    def endElement(self, name, value, connection):
        self._xml.write(
            "%s</%s>" %
            (cgi.escape(value).replace("&amp;amp;", "&amp;"), name))
        if len(self._nodepath) == 0:
            return
        obj = None
        curval = self.get(name)
        if len(self._nodepath) == 1:
            if value or not curval:
                self.set(name, value)
            if self._curobj:
                self._curobj = None
        #elif len(self._nodepath) == 2:
        #self._curobj = None
        elif self._curobj:
            self._curobj.endElement(name, value, connection)
        self._nodepath.pop()
        return None
Example #60
0
def launch_ec2(params_list,
               exp_prefix,
               docker_image,
               code_full_path,
               script='scripts/run_experiment.py',
               aws_config=None,
               dry=False,
               terminate_machine=True,
               use_gpu=False):
    if len(params_list) == 0:
        return

    default_config = dict(
        image_id=config.AWS_IMAGE_ID,
        instance_type=config.AWS_INSTANCE_TYPE,
        key_name=config.AWS_KEY_NAME,
        spot=config.AWS_SPOT,
        spot_price=config.AWS_SPOT_PRICE,
        iam_instance_profile_name=config.AWS_IAM_INSTANCE_PROFILE_NAME,
        security_groups=config.AWS_SECURITY_GROUPS,
    )

    if aws_config is None:
        aws_config = dict()
    aws_config = dict(default_config, **aws_config)

    sio = StringIO()
    sio.write("#!/bin/bash\n")
    sio.write("{\n")
    sio.write("""
        die() { status=$1; shift; echo "FATAL: $*"; exit $status; }
    """)
    sio.write("""
        EC2_INSTANCE_ID="`wget -q -O - http://instance-data/latest/meta-data/instance-id`"
    """)
    sio.write("""
        aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
    """.format(exp_name=params_list[0].get("exp_name"),
               aws_region=config.AWS_REGION_NAME))
    sio.write("""
        service docker start
    """)
    sio.write("""
        docker --config /home/ubuntu/.docker pull {docker_image}
    """.format(docker_image=docker_image))
    sio.write("""
        aws s3 cp --recursive {code_full_path} {local_code_path} --region {aws_region}
    """.format(code_full_path=code_full_path,
               local_code_path=config.DOCKER_CODE_DIR,
               aws_region=config.AWS_REGION_NAME))
    sio.write("""
        cd {local_code_path}
    """.format(local_code_path=config.DOCKER_CODE_DIR))

    for params in params_list:
        log_dir = params.get("log_dir")
        remote_log_dir = params.pop("remote_log_dir")
        env = params.pop("env", None)

        sio.write("""
            aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
        """.format(exp_name=params.get("exp_name"),
                   aws_region=config.AWS_REGION_NAME))
        sio.write("""
            mkdir -p {log_dir}
        """.format(log_dir=log_dir))
        sio.write("""
            while /bin/true; do
                aws s3 sync --exclude *.pkl {log_dir} {remote_log_dir} --region {aws_region}
                sleep 5
            done & echo sync initiated""".format(
            log_dir=log_dir,
            remote_log_dir=remote_log_dir,
            aws_region=config.AWS_REGION_NAME))
        sio.write("""
            {command}
        """.format(
            command=to_docker_command(params,
                                      docker_image,
                                      script,
                                      use_gpu=use_gpu,
                                      env=env,
                                      local_code_dir=config.DOCKER_CODE_DIR)))
        sio.write("""
            aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}
        """.format(log_dir=log_dir,
                   remote_log_dir=remote_log_dir,
                   aws_region=config.AWS_REGION_NAME))
        sio.write("""
            aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}
        """.format(remote_log_dir=remote_log_dir,
                   aws_region=config.AWS_REGION_NAME))

    if terminate_machine:
        sio.write("""
            EC2_INSTANCE_ID="`wget -q -O - http://instance-data/latest/meta-data/instance-id || die \"wget instance-id has failed: $?\"`"
            aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}
        """.format(aws_region=config.AWS_REGION_NAME))
    sio.write("} >> /home/ubuntu/user_data.log 2>&1\n")

    full_script = dedent(sio.getvalue())

    import boto3
    import botocore
    if aws_config["spot"]:
        ec2 = boto3.client(
            "ec2",
            region_name=config.AWS_REGION_NAME,
            aws_access_key_id=config.AWS_ACCESS_KEY,
            aws_secret_access_key=config.AWS_ACCESS_SECRET,
        )
    else:
        ec2 = boto3.resource(
            "ec2",
            region_name=config.AWS_REGION_NAME,
            aws_access_key_id=config.AWS_ACCESS_KEY,
            aws_secret_access_key=config.AWS_ACCESS_SECRET,
        )

    if len(full_script) > 10000 or len(base64.b64encode(full_script)) > 10000:
        # Script too long; need to upload script to s3 first.
        # We're being conservative here since the actual limit is 16384 bytes
        s3_path = upload_file_to_s3(full_script)
        sio = StringIO()
        sio.write("#!/bin/bash\n")
        sio.write("""
        aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\
        chmod +x /home/ubuntu/remote_script.sh && \\
        bash /home/ubuntu/remote_script.sh
        """.format(s3_path=s3_path, aws_region=config.AWS_REGION_NAME))
        user_data = dedent(sio.getvalue())
    else:
        user_data = full_script

    instance_args = dict(
        ImageId=aws_config["image_id"],
        KeyName=aws_config["key_name"],
        UserData=user_data,
        InstanceType=aws_config["instance_type"],
        EbsOptimized=True,
        SecurityGroups=aws_config["security_groups"],
        IamInstanceProfile=dict(
            Name=aws_config["iam_instance_profile_name"], ),
    )
    if aws_config.get("placement", None) is not None:
        instance_args["Placement"] = aws_config["placement"]
    if not aws_config["spot"]:
        instance_args["MinCount"] = 1
        instance_args["MaxCount"] = 1
    print "************************************************************"
    print instance_args["UserData"]
    print "************************************************************"
    if aws_config["spot"]:
        instance_args["UserData"] = base64.b64encode(instance_args["UserData"])
        spot_args = dict(
            DryRun=dry,
            InstanceCount=1,
            LaunchSpecification=instance_args,
            SpotPrice=aws_config["spot_price"],
            ClientToken=params_list[0]["exp_name"],
        )
        import pprint
        pprint.pprint(spot_args)
        if not dry:
            response = ec2.request_spot_instances(**spot_args)
            print response
            spot_request_id = response['SpotInstanceRequests'][0][
                'SpotInstanceRequestId']
            for _ in range(10):
                try:
                    ec2.create_tags(
                        Resources=[spot_request_id],
                        Tags=[{
                            'Key': 'Name',
                            'Value': params_list[0]["exp_name"]
                        }],
                    )
                    break
                except botocore.exceptions.ClientError:
                    continue
    else:
        import pprint
        pprint.pprint(instance_args)
        ec2.create_instances(DryRun=dry, **instance_args)