class StorageFile(File): def __init__(self, name, mode, storage): self.name = name self.mode = mode self.file = StringIO.StringIO() self._storage = storage self._is_dirty = False @property def size(self): if hasattr(self, '_size'): self._size = self.storage.size(self.name) return self._size def read(self, num_bytes=None): if not hasattr(self, '_obj'): self._obj = self._storage._open_read(self.name) return self._obj.read(num_bytes) def write(self, content): if 'w' not in self._mode: raise AttributeError("File was opened for read-only access.") self.file = StringIO(content) self._is_dirty = True def close(self): if self._is_dirty: self._storage._save(self.name, self.file.getvalue()) self.file.close()
def generate_topart(nick, period, width, height): size = config.ABOUT_ME_WIDTH // width req_size = opt_size(size) error = '' topart = None images, error = get_arts_images(nick, period, width * height, req_size) if images and not error: if len(images) < width * height: if len(images) >= width: height = len(images) // width images = images[:width * height] else: height = 1 width = len(images) canvas = Image.new('RGBA', (size * width, size * height)) for index, image in enumerate(images): append_image(canvas, image, index, width, size) output = StringIO() canvas.save(output, format="PNG") topart = output.getvalue() output.close() else: error = 'Topart generation failed' return topart, error
def act_getfile(self, cr, uid, ids, context=None): this = self.browse(cr, uid, ids)[0] #lang = this.lang if this.lang != NEW_LANG_KEY else False #mods = map(lambda m: m.name, this.modules) or ['all'] #mods.sort() data = self.make_data(cr, uid, ids, this, context) #exp = self.browse(cr, uid, ids)[0] buf = StringIO() buf = self.generate_file_export(cr, uid, ids, buf, data, context) out = base64.encodestring(buf.getvalue()) buf.close() this.name = "Master_payroll.csv" writer = {'state': 'get', 'file': out, 'name': this.name} if context and len(context['active_ids']) > 0: slips = self.pool.get('hr.payslip').browse(cr, uid, context['active_ids']) writer.update({'payslip_ids': [(6, 0, context['active_ids'])]}) self.write(cr, uid, ids, writer, context=context) return { 'type': 'ir.actions.act_window', 'res_model': 'hr.master.payroll', 'view_mode': 'form', 'view_type': 'form', 'res_id': this.id, 'views': [(False, 'form')], #'target': 'new', }
def generateMainpageHTML(self): fout = open('%s/index.html' % self.LogBookFolder, 'w') fout.write('<html>\n<body>\n') tableout = StringIO() tableout.write('<table border = \'1\'>\n') entriesRevOrder = [x for x in self.entries] entriesRevOrder.sort() entriesRevOrder.reverse() for datestr in entriesRevOrder: self.writeDateEntryHTML(tableout, datestr) for entry in self.entries[datestr]: self.writeSubEntryHTMLBegin(tableout, entry) for tag in entry.tags: if not tag in self.tags: self.tags[tag] = 0 self.tags[tag] = self.tags[tag] + 1 self.writeTagLinkHTML(tableout, tag) self.writeSubEntryHTMLEnd(tableout) tableout.write('</table>\n') tablestring = tableout.getvalue() tableout.close() self.writeTagCountsHTML(fout) fout.write(tablestring) fout.write('</body>\n</html>\n')
class TestWriter(unittest.TestCase): def setUp(self): self.io = StringIO() self.writer = excelcsv.writer(self.io) def tearDown(self): self.io.close() def test_write(self): self.writer.writerow([u'a', u'b', u'c']) self.writer.writerow([u'd', u'e', u'f']) value = self.io.getvalue() self.assertIsInstance(value, text_type) self.assertEqual(value, u'a,b,c\r\nd,e,f\r\n') def test_writerows(self): self.writer.writerows([ [u'a', u'b', u'c'], [u'd', u'e', u'f'], ]) value = self.io.getvalue() self.assertIsInstance(value, text_type) self.assertEqual(value, u'a,b,c\r\nd,e,f\r\n')
def encFile(file_name, passphrase): a = StringIO() yield a KEYRING = './keyring.gpg' SECRET_KEYRING = './secring.gpg' GPGBINARY = 'gpg' gpg = gnupg.GPG(gnupghome='.', gpgbinary=GPGBINARY, keyring=KEYRING, secret_keyring=SECRET_KEYRING, options=[ '--throw-keyids', '--personal-digest-preferences=sha256', '--s2k-digest-algo=sha256' ]) gpg.encoding = 'latin-1' ciphertext = gpg.encrypt(a.getvalue(), recipients=None, symmetric='AES256', armor=False, always_trust=True, passphrase=passphrase) a.close() with open(file_name, 'wb') as f: f.write(ciphertext.data)
def dump(self, data, stream=None, **kw): if not stream: stream = StringIO() try: yaml.dump(data=data, stream=stream, Dumper=yaml.Dumper, **kw) return self._license_filter(stream.getvalue().strip()) finally: stream.close()
def dump(self, data, stream=None, **kw): if not stream: stream = StringIO() try: RYAML.dump(self, data, stream, **kw) return self._license_filter(stream.getvalue().strip()) finally: stream.close()
def _print(request,template,output_format,output_file_ext,content_type): metadata = get_metadata(request) output_file = tempfile.mkstemp(suffix=output_file_ext,prefix="{}_".format(template))[1] print_js_file = "{}.js".format(output_file) sso_cookie = request.COOKIES.get(settings.SSO_COOKIE_NAME,"") print_html = pathlib.Path(os.path.join(app_dir,"static","{}.html".format(template))).as_uri() metadata_str = json.dumps(metadata,indent=4) context = Context({ "output_format":output_format, "output_file":output_file, "sso_cookie":'1qzhg1by1hs5c4frudmftbq2we8v97j2', "sso_cookie_name":settings.SSO_COOKIE_NAME, "sso_cookie_domain":settings.SSO_COOKIE_DOMAIN, "print_html":print_html, "metadata" : metadata_str, "login_user":{"name":"rockyc","email":"*****@*****.**"}, "timeout":300, "log_level":logger.getEffectiveLevel(), "working_directory":working_directory, "keep_tmp_file": json.dumps(settings.KEEP_TMP_FILE) }) try: with open(print_js_file,'wb') as print_js: print_js.write(bytes(print_template.render(context),'UTF-8')) print_js.flush() print_process = subprocess.Popen(["phantomjs",print_js.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) log_stddout = threading.Thread(target="") error = StringIO() try: stdout_thread = threading.Thread(target=log_subprocess_stdout,args=(print_process.stdout,error)) stdout_thread.setDaemon(True) stdout_thread.start() stderr_thread = threading.Thread(target=log_subprocess_stderr,args=(print_process.stderr,error)) stderr_thread.setDaemon(True) stderr_thread.start() stdout_thread.join() stderr_thread.join() print_process.poll() while(print_process.returncode is None): print_process.poll() time.sleep(1) if print_process.returncode != 0: logger.error("Generate print document failed, \nreason={}".format(error.getvalue())) os.remove(output_file) return HttpResponse("<html><head/><body><pre>{}</pre></body>".format(error.getvalue()),status=500) if settings.KEEP_TMP_FILE: response = FileResponse(open(output_file, 'rb'),content_type=content_type) else: response = TempFileResponse(open(output_file, 'rb'),content_type=content_type) return response; finally: error.close() finally: if not settings.KEEP_TMP_FILE: os.remove(print_js_file)
def mat2cv(m): fig = pylab.figure(1) ax = pylab.matshow(m,fignum=1) buf = StringIO() fig.savefig(buf,format='png') buf.seek(0) pi = Image.open(buf) cv_im = cv.CreateImageHeader(pi.size, cv.IPL_DEPTH_8U, 3) cv.SetData(cv_im, pi.tostring(),pi.size[0]*3) buf.close() return cv_im
def disassemble(func): from StringIO import StringIO import sys, dis f = StringIO() tmp = sys.stdout sys.stdout = f dis.dis(func) sys.stdout = tmp result = f.getvalue() f.close() return result
def main(): make_option = optparse.make_option opt = optparse.OptionParser(option_list=[ make_option('-o', dest='output', help='write to FILE', metavar='FILE'), make_option('-d', dest='depends', help='write dependencies to FILE', metavar='FILE', default=None), make_option('-m', dest='manifest', help='read manifest from FILE', metavar='FILE'), make_option('-D', type='string', help='define VAR=DATA', metavar='VAR=DATA', action='callback', callback=add_var), ]) (options, args) = opt.parse_args() depends = StringIO() if options.depends: depends = file(options.depends, 'w') manifest = configparser.SafeConfigParser() manifest.optionxform = str # avoid lowercasing manifest.read(options.manifest) depends.write('%s: \\\n' % (options.output, )) image_path = os.path.abspath(options.output) osv = subprocess.Popen( 'cd ../..; scripts/run.py --vnc none -m 512 -c1 -i %s -u -s -e "--norandom --noinit /tools/cpiod.so" --forward tcp:10000::10000' % image_path, shell=True, stdout=subprocess.PIPE) upload(osv, manifest, depends) osv.wait() # Disable ZFS compression; it stops taking effect from this point on. osv = subprocess.Popen( 'cd ../..; scripts/run.py -m 512 -c1 -i %s -u -s -e "--norandom --noinit /zfs.so set compression=off osv"' % image_path, shell=True, stdout=subprocess.PIPE) osv.wait() depends.write('\n\n') depends.close()
def main(): make_option = optparse.make_option opt = optparse.OptionParser(option_list=[ make_option('-o', dest='output', help='write to FILE', metavar='FILE'), make_option('-d', dest='depends', help='write dependencies to FILE', metavar='FILE', default=None), make_option('-m', dest='manifest', help='read manifest from FILE', metavar='FILE'), make_option('-D', type='string', help='define VAR=DATA', metavar='VAR=DATA', action='callback', callback=add_var), make_option('-x', dest='emulation', help='use emulation instead of a hypervisor', metavar='FILE'), ]) (options, args) = opt.parse_args() depends = StringIO() if options.depends: depends = file(options.depends, 'w') manifest = read_manifest(options.manifest) depends.write('%s: \\\n' % (options.output,)) image_path = os.path.abspath(options.output) osv = None if options.emulation == 'y': print("Using emulation (no hypervisor support) for image creation") osv = subprocess.Popen('cd ../..; scripts/run.py --vnc none -m 512 -c1 -i %s -u -s -p none -e "--norandom --nomount --noinit /tools/mkfs.so; /tools/cpiod.so --prefix /zfs/zfs/; /zfs.so set compression=off osv" --forward tcp:10000::10000' % image_path, shell=True, stdout=subprocess.PIPE) else: print("Using hypervisor support for image creation") osv = subprocess.Popen('cd ../..; scripts/run.py --vnc none -m 512 -c1 -i %s -u -s -e "--norandom --nomount --noinit /tools/mkfs.so; /tools/cpiod.so --prefix /zfs/zfs/; /zfs.so set compression=off osv" --forward tcp:10000::10000' % image_path, shell=True, stdout=subprocess.PIPE) upload(osv, manifest, depends) osv.wait() depends.write('\n\n') depends.close()
def execwo(v): codeOut = StringIO() codeErr = StringIO() sys.stdout = codeOut sys.stderr = codeErr exec(v) sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ rv = (codeOut.getvalue(), codeErr.getvalue()) codeOut.close() codeErr.close() return rv
def create_from_file(self, permid, srcfilename): """ srcfilename must point to an image file processable by wx.Image """ try: sim = wx.Image(srcfilename).Scale(ICON_MAX_DIM, ICON_MAX_DIM) sim.SaveFile(dstfilename, wx.BITMAP_TYPE_JPEG) f = StringIO() sim.SaveStream(f, wx.BITMAP_TYPE_JPEG) self.peer_db.updatePeerIcon('image/jpeg', f.getvalue()) f.close() except: if DEBUG: print_exc() pass
def getURL(): try: import StringIO except ImportError: from io import StringIO c = pycurl.Curl() buf = StringIO() c.setopt(c.URL, 'https://api.twitch.tv/kraken/streams/{}/'.format(settings.channel.split('#')[1])) c.setopt(c.WRITEFUNCTION, buf.write) c.perform() ret = buf.getvalue() buf.close() return ret
def renderContent(self, xml, frameID, channelID = None): transform = self.stylesheetMap.get((frameID, channelID)) if transform == None: return xml else: try: xmlHandle = StringIO(xml) documentTree = etree.parse(xmlHandle) result = transform(documentTree) return str(result) finally: xmlHandle.close()
def main(): make_option = optparse.make_option opt = optparse.OptionParser(option_list=[ make_option('-o', dest='output', help='write to FILE', metavar='FILE'), make_option('-d', dest='depends', help='write dependencies to FILE', metavar='FILE', default=None), make_option('-m', dest='manifest', help='read manifest from FILE', metavar='FILE'), make_option('-D', type='string', help='define VAR=DATA', metavar='VAR=DATA', action='callback', callback=add_var), ]) (options, args) = opt.parse_args() depends = StringIO() if options.depends: depends = file(options.depends, 'w') manifest = configparser.SafeConfigParser() manifest.optionxform = str # avoid lowercasing manifest.read(options.manifest) depends.write('%s: \\\n' % (options.output,)) image_path = os.path.abspath(options.output) osv = subprocess.Popen('cd ../..; scripts/run.py --vnc none -m 512 -c1 -i %s -u -s -e "--norandom --noinit /tools/cpiod.so" --forward tcp:10000::10000' % image_path, shell=True, stdout=subprocess.PIPE) upload(osv, manifest, depends) osv.wait() # Disable ZFS compression; it stops taking effect from this point on. osv = subprocess.Popen('cd ../..; scripts/run.py -m 512 -c1 -i %s -u -s -e "--norandom --noinit /zfs.so set compression=off osv"' % image_path, shell=True, stdout=subprocess.PIPE) osv.wait() depends.write('\n\n') depends.close()
def main(): make_option = optparse.make_option opt = optparse.OptionParser( option_list=[ make_option("-o", dest="output", help="write to FILE", metavar="FILE"), make_option("-d", dest="depends", help="write dependencies to FILE", metavar="FILE", default=None), make_option("-m", dest="manifest", help="read manifest from FILE", metavar="FILE"), make_option( "-D", type="string", help="define VAR=DATA", metavar="VAR=DATA", action="callback", callback=add_var ), ] ) (options, args) = opt.parse_args() depends = StringIO() if options.depends: depends = file(options.depends, "w") manifest = configparser.SafeConfigParser() manifest.optionxform = str # avoid lowercasing manifest.read(options.manifest) depends.write("%s: \\\n" % (options.output,)) image_path = os.path.abspath(options.output) osv = subprocess.Popen( 'cd ../..; scripts/run.py --vnc none -m 512 -c1 -i %s -u -s -e "--norandom --noinit /tools/cpiod.so" --forward tcp:10000::10000' % image_path, shell=True, stdout=subprocess.PIPE, ) upload(osv, manifest, depends) osv.wait() # Disable ZFS compression; it stops taking effect from this point on. osv = subprocess.Popen( 'cd ../..; scripts/run.py -m 512 -c1 -i %s -u -s -e "--norandom --noinit /zfs.so set compression=off osv"' % image_path, shell=True, stdout=subprocess.PIPE, ) osv.wait() depends.write("\n\n") depends.close()
def LoadConfig(): Config.parser = ConfigParser() try: sconff = open(CONFIG_FILE, "r") except: Log.warn("cannot open config file") return sconf = StringIO() sconf.write("[sysconf]\n") sconf.write(sconff.read()) sconf.seek(0) Config.parser.readfp(sconf) sconff.close() sconf.close() return
def _get_barcode_img(cls, Withholding, withholding): from barras import CodigoBarra from cStringIO import StringIO as StringIO # create the helper: codigobarra = CodigoBarra() output = StringIO() bars = withholding.numero_autorizacion codigobarra.GenerarImagen(bars, output, basewidth=3, width=380, height=50, extension="PNG") image = buffer(output.getvalue()) output.close() return image
def main(): make_option = optparse.make_option opt = optparse.OptionParser(option_list=[ make_option('-o', dest='output', help='write to FILE', metavar='FILE'), make_option('-d', dest='depends', help='write dependencies to FILE', metavar='FILE', default=None), make_option('-m', dest='manifest', help='read manifest from FILE', metavar='FILE'), make_option('-D', type='string', help='define VAR=DATA', metavar='VAR=DATA', action='callback', callback=add_var), ]) (options, args) = opt.parse_args() depends = StringIO() if options.depends: depends = file(options.depends, 'w') manifest = read_manifest(options.manifest) depends.write('%s: \\\n' % (options.output, )) image_path = os.path.abspath(options.output) upload_port = find_free_port() osv = subprocess.Popen( 'cd ../..; scripts/run.py --vnc none -m 512 -c1 -i "%s" -u -s -e "--norandom --nomount --noinit /tools/mkfs.so; /tools/cpiod.so --prefix /zfs/zfs/; /zfs.so set compression=off osv" --forward tcp:127.0.0.1:%s-:10000' % (image_path, upload_port), shell=True, stdout=subprocess.PIPE) upload(osv, manifest, depends, upload_port) ret = osv.wait() if ret != 0: sys.exit("Upload failed.") depends.write('\n\n') depends.close()
def pack(self, protocol_object): assert isinstance(protocol_object, self.msg_cls) _data = struct.pack('>I', self.msg_code) + protocol_object.SerializeToString() if self.compress: mstream = StringIO() f = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=mstream) f.write(_data) f.close() _data = mstream.getvalue() mstream.close() if self.timestamp: insertData = chr(self.compress) + chr(self.timestamp) + struct.pack('>Q', int(time.time()*1000)) else: insertData = chr(self.compress) + chr(self.timestamp) compressIdx = (len(_data) + len(insertData))/3 _data = _data[:compressIdx] + insertData + _data[compressIdx:] return _data
class Export(object): ''' Base class for all Exporters. ''' def __init__(self): super(Export, self).__init__() # Export initialization self._render_set = set() # Used in rendering to prevent rendering # of the same node multiple times self._adapter_map = {} # Used as a registry of adapters to ensure # ensure that the same adapter is # returned for the same adaptee object def export(self, obj): '''Export of obj to a string.''' self._outf = StringIO() self._export(obj) return self._outf.getvalue() def exportFile(self, obj, file_name): '''Export of obj to a file.''' self._outf = open(file_name, "w") self._export(obj) self._outf.close() def _export(self, obj): self._outf.write(self._start()) self._render_node(obj) self._outf.write(self._end()) def _start(self): ''' Overide this to specify the begining of the graph representation. ''' return "" def _end(self): ''' Overide this to specify the end of the graph representation. ''' return ""
def readXML(f): start_of_xml = -1 end_of_xml = -1 xml_str = StringIO() while True: byte = f.read(1) if start_of_xml == -1 and byte == markers.OPEN_CHEVRON: start_of_xml = f.tell() if start_of_xml > -1: if byte == markers.NUL: break else: xml_str.write(byte) profile.add_xml_to_profile(xml_str.getvalue()) xml_str.close()
def post_reset(self, challenge_id): curl_post = pycurl.Curl() post_response = StringIO() curl_URL = 'http://' + os.environ['CTF_URL'] + '/api/reset_question' #curl_post.setopt(curl_post.URL, 'http://ai.defcon.ichunqiu.com/api/reset_question) curl_post.setopt(curl_post.URL, curl_URL) post_data = 'ChallengeID=' + challenge_id curl_post.setopt(curl_post.POST, 1) curl_post.setopt(pycurl.USERPWD, 'user01' + ':' + '938524') curl_post.setopt(curl_post.POSTFIELDS, post_data) curl_post.setopt(curl_post.CONNECTTIMEOUT, 10) curl_post.setopt(curl_post.TIMEOUT, 20) curl_post.setopt(curl_post.WRITEFUNCTION, post_response.write) curl_post.perform() reset_status = json.loads(post_response.getvalue()).get('status') print 'status ' + str(reset_status) post_response.close() curl_post.close() return reset_status
def generate_file(self): #wzd_values = self.read(cr,uid,ids,context=context)[0] path = '/tmp/file_%s.csv' % (datetime.today()) export_objs = self.env['economical.activities'].search([]) fp = StringIO() with open(path, 'w') as csvfile: fieldnames = ['code', 'name'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for line in export_objs: print line writer.writerow({ 'code': str(line.code), 'name': str(line.name.encode('utf-8').strip()) }) fp.close() csvfile.close() arch = open(path, 'r').read() data = base64.encodestring(arch) attach_vals = { 'name': 'File%s.csv' % (datetime.today().strftime("%d-%m-%Y")), 'datas': data, 'datas_fname': 'File#_%s.csv' % (datetime.today().strftime("%d-%m-%Y")), } doc_id = self.env['ir.attachment'].create(attach_vals) return { 'type': "ir.actions.act_url", 'url': "web/content/?model=ir.attachment&id=" + str(doc_id.id) + "&filename_field=datas_fname&field=datas&download=true&filename=" + str(doc_id.name), 'target': "self", }
def build_archive(logs): fd = tempfile.SpooledTemporaryFile() tar = tarfile.open(mode='w:gz', fileobj=fd) bufs = [] for logdata in logs: tinf = tarfile.TarInfo(logdata['filename']) tinf.mtime = logdata['stat'].st_mtime tinf.mode = logdata['stat'].st_mode buf = StringIO() buf.write(logdata['contents']) buf.seek(0) bufs.append(buf) tinf.size = len(buf.buf) tar.addfile(tinf, fileobj=buf) tar.close() for buf in bufs: buf.close() fd.seek(0) s = fd.read() fd.close() return s
def post_answer(self, flag): curl_post = pycurl.Curl() post_response = StringIO() # c.setopt(pycurl.SSL_VERIFYHOST, False) # c.setopt(pycurl.SSL_VERIFYPEER, False) curl_URL = 'http://' + os.environ['CTF_URL'] + '/api/sub_answer' #curl_post.setopt(curl_post.URL, 'http://ai.defcon.ichunqiu.com/api/sub_answer') curl_post.setopt(curl_post.URL, curl_URL) post_data = 'answer=' + flag curl_post.setopt(curl_post.POST, 1) curl_post.setopt(pycurl.USERPWD, 'user01' + ':' + '938524') curl_post.setopt(curl_post.POSTFIELDS, post_data) curl_post.setopt(curl_post.CONNECTTIMEOUT, 10) curl_post.setopt(curl_post.TIMEOUT, 20) curl_post.setopt(curl_post.WRITEFUNCTION, post_response.write) curl_post.perform() flag_status = json.loads(post_response.getvalue()).get('status') print 'status ' + str(flag_status) post_response.close() curl_post.close() return flag_status
def assignStylesheet(self, xslFilename, frameID, channelID=None, **kwargs): # first, load the stylesheet as a Jinja template xslHandle = None try: xslTemplateObject = self.j2TemplateMgr.getTemplate(xslFilename) # resolve the Jinja tags in the template, place result in a file-like string object xslHandle = StringIO(xslTemplateObject.render()) # now use the resolved XSL (which should have no more Jinja tags) # as a valid & well-formed XSL stylesheet. xslTree = etree.parse(xslHandle) xslTransform = etree.XSLT(xslTree) self.stylesheetMap[(frameID, channelID)] = xslTransform except jinja2.TemplateError as err: pass #TODO: redirect to error page finally: if xslHandle is not None: xslHandle.close()
def decFile(file_name, passphrase): KEYRING = './keyring.gpg' SECRET_KEYRING = './secring.gpg' GPGBINARY = 'gpg' gpg = gnupg.GPG(gnupghome='.', gpgbinary=GPGBINARY, keyring=KEYRING, secret_keyring=SECRET_KEYRING, options=[ '--throw-keyids', '--personal-digest-preferences=sha256', '--s2k-digest-algo=sha256' ]) gpg.encoding = 'latin-1' with open(file_name, 'rb') as f: ciphertext = f.read() plaintext = gpg.decrypt(ciphertext, passphrase=passphrase, always_trust=True) a = StringIO(plaintext) a.seek(0) yield a a.close()
class SFTPStorageFile(File): def __init__(self, name, storage, mode): self._name = name self._storage = storage self._mode = mode self._is_dirty = False self.file = StringIO() self._is_read = False @property def size(self): if not hasattr(self, '_size'): self._size = self._storage.size(self._name) return self._size def read(self, num_bytes=None): if not self._is_read: self._storage._start_connection() self.file = self._storage._read(self._name) self._storage._end_connection() self._is_read = True return self.file.read(num_bytes) def write(self, content): if 'w' not in self._mode: raise AttributeError("File was opened for read-only access.") self.file = StringIO(content) self._is_dirty = True self._is_read = True def close(self): if self._is_dirty: self._storage._start_connection() self._storage._put_file(self._name, self.file.getvalue()) self._storage._end_connection() self.file.close()
return Image.open(strio) def getScreenByPIL(self): import ImageGrab img = ImageGrab.grab() return img def getScreenByWx(self): import wx wx.App() # Need to create an App instance before doing anything screen = wx.ScreenDC() size = screen.GetSize() bmp = wx.EmptyBitmap(size[0], size[1]) mem = wx.MemoryDC(bmp) mem.Blit(0, 0, size[0], size[1], screen, 0, 0) del mem # Release bitmap #bmp.SaveFile('screenshot.png', wx.BITMAP_TYPE_PNG) myWxImage = wx.ImageFromBitmap( myBitmap ) PilImage = Image.new( 'RGB', (myWxImage.GetWidth(), myWxImage.GetHeight()) ) PilImage.fromstring( myWxImage.GetData() ) return PilImage if __name__ == '__main__': from cStringIO import StringIO s = screengrab() screen = s.screen() out = StringIO() screen.save(out, format="PNG") out.close() screen.show()
class formatController: '''A class to convert a Leo outline to rst/Sphinx markup. The outline is presumed to contain computer source code.''' #@+others #@+node:ekr.20100811091636.5922: *3* Birth & init #@+node:ekr.20100811091636.5923: *4* ctor (rstClass) def __init__ (self,c,p,defaultOptionsDict): self.c = c self.p = p.copy() self.defaultOptionsDict = defaultOptionsDict #@+<< init ivars >> #@+node:ekr.20100811091636.5924: *5* << init ivars >> # The options dictionary. self.optionsDict = {} self.vnodeOptionDict = {} # Formatting... self.code_block_string = '' self.node_counter = 0 self.topLevel = 0 self.topNode = None # For writing. self.atAutoWrite = False # True, special cases for writeAtAutoFile. self.atAutoWriteUnderlines = '' # Forced underlines for writeAtAutoFile. self.leoDirectivesList = g.globalDirectiveList self.encoding = 'utf-8' self.ext = None # The file extension. self.outputFileName = None # The name of the file being written. self.outputFile = None # The open file being written. self.path = '' # The path from any @path directive. self.source = None # The written source as a string. self.trialWrite = False # True if doing a trialWrite. #@-<< init ivars >> self.initOptionsFromSettings() # Still needed. self.initSingleNodeOptions() #@+node:ekr.20100811091636.5928: *4* initSingleNodeOptions def initSingleNodeOptions (self): self.singleNodeOptions = [ 'ignore_this_headline', 'ignore_this_node', 'ignore_this_tree', 'preformat_this_node', 'show_this_headline', ] #@+node:ekr.20100811091636.5931: *3* Options #@+node:ekr.20100811091636.5934: *4* get/SetOption def getOption (self,name): return self.optionsDict.get(self.munge(name)) def setOption (self,name,val,tag): self.optionsDict [self.munge(name)] = val #@+node:ekr.20100811091636.5929: *4* munge def munge (self,name): '''Convert an option name to the equivalent ivar name.''' return name.lower().replace('-','').replace('_','') #@+node:ekr.20100811091636.5944: *4* scanAllOptions & helpers # Once an option is seen, no other related options in ancestor nodes have any effect. def scanAllOptions(self,p): '''Scan position p and p's ancestors looking for options, setting corresponding ivars. ''' self.initOptionsFromSettings() # Must be done on every node. self.preprocessNode(p) self.handleSingleNodeOptions(p) seen = self.singleNodeOptions[:] # Suppress inheritance of single-node options. for p in p.self_and_parents(): d = self.vnodeOptionDict.get(p.v,{}) for key in d.keys(): if not key in seen: seen.append(key) val = d.get(key) self.setOption(key,val,tag=p.h) #@+node:ekr.20100811091636.5945: *5* initOptionsFromSettings def initOptionsFromSettings (self): d = self.defaultOptionsDict for key in sorted(d): self.setOption(key,d.get(key),'initOptionsFromSettings') #@+node:ekr.20100811091636.5946: *5* handleSingleNodeOptions def handleSingleNodeOptions (self,p): '''Init the settings of single-node options from the vnodeOptionsDict. All such options default to False.''' d = self.vnodeOptionDict.get(p.v, {} ) for ivar in self.singleNodeOptions: val = d.get(ivar,False) #g.trace('%24s %8s %s' % (ivar,val,p.h)) self.setOption(ivar,val,p.h) #@+node:ekr.20100811091636.5937: *5* preprocessNode def preprocessNode (self,p): d = self.vnodeOptionDict.get(p.v) if d is None: d = self.scanNodeForOptions(p) self.vnodeOptionDict [p.v] = d #@+node:ekr.20100811091636.5941: *5* scanNodeForOptions & helpers def scanNodeForOptions (self,p): '''Return a dictionary containing all the option-name:value entries in p. Such entries may arise from @rst-option or @rst-options in the headline, or from @ @rst-options doc parts.''' d = self.scanHeadlineForOptions(p) d2 = self.scanForOptionDocParts(p,p.b) d.update(d2) # Body options over-ride headline options. if d: g.trace(p.h,d) return d #@+node:ekr.20100811091636.5938: *6* parseOptionLine def parseOptionLine (self,s): '''Parse a line containing name=val and return (name,value) or None. If no value is found, default to True.''' s = s.strip() if s.endswith(','): s = s[:-1] # Get name. Names may contain '-' and '_'. i = g.skip_id(s,0,chars='-_') name = s [:i] if not name: return None j = g.skip_ws(s,i) if g.match(s,j,'='): val = s [j+1:].strip() # g.trace(val) return name,val else: # g.trace('*True') return name,'True' #@+node:ekr.20100811091636.5939: *6* scanForOptionDocParts def scanForOptionDocParts (self,p,s): '''Return a dictionary containing all options from @rst-options doc parts in p. Multiple @rst-options doc parts are allowed: this code aggregates all options. ''' d = {} ; n = 0 ; lines = g.splitLines(s) while n < len(lines): line = lines[n] ; n += 1 if line.startswith('@'): i = g.skip_ws(line,1) for kind in ('@rst-options','@rst-option'): if g.match_word(line,i,kind): # Allow options on the same line. line = line[i+len(kind):] d.update(self.scanOption(p,line)) # Add options until the end of the doc part. while n < len(lines): line = lines[n] ; n += 1 ; found = False for stop in ('@c','@code', '@'): if g.match_word(line,0,stop): found = True ; break if found: break else: d.update(self.scanOption(p,line)) break return d #@+node:ekr.20100811091636.5940: *6* scanHeadlineForOptions def scanHeadlineForOptions (self,p): '''Return a dictionary containing the options implied by p's headline.''' h = p.h.strip() if p == self.topNode: return {} # Don't mess with the root node. if g.match_word(h,0,self.getOption('@rst-options')): return self.scanOptions(p,p.b) else: # Careful: can't use g.match_word because options may have '-' chars. # i = g.skip_id(h,0,chars='@-') # word = h[0:i] for option,ivar,val in ( ('@rst-no-head','ignore_this_headline',True), ('@rst-head' ,'show_this_headline',True), ('@rst-no-headlines','show_headlines',False), ('@rst-ignore','ignore_this_tree',True), ('@rst-ignore-node','ignore_this_node',True), ('@rst-ignore-tree','ignore_this_tree',True), # ('@rst-preformat','preformat_this_node',True), ): name = self.getOption(option) if name: d = { name: val } return d return {} #@+node:ekr.20100811091636.5942: *6* scanOption def scanOption (self,p,s): '''Return { name:val } if s is a line of the form name=val. Otherwise return {}''' if not s.strip() or s.strip().startswith('..'): return {} data = self.parseOptionLine(s) if data: name,val = data if name in list(self.defaultOptionsDict.keys()): if val.lower() == 'true': val = True elif val.lower() == 'false': val = False # g.trace('%24s %8s %s' % (self.munge(name),val,p.h)) return { self.munge(name): val } else: g.es_print('ignoring unknown option: %s' % (name),color='red') return {} else: g.trace(repr(s)) s2 = 'bad rst3 option in %s: %s' % (p.h,s) g.es_print(s2,color='red') return {} #@+node:ekr.20100811091636.5943: *6* scanOptions def scanOptions (self,p,s): '''Return a dictionary containing all the options in s.''' d = {} for line in g.splitLines(s): d2 = self.scanOption(p,line) if d2: d.update(d2) return d #@+node:ekr.20100811091636.6000: *3* Writing #@+node:ekr.20100811091636.5984: *4* encode def encode (self,s): # g.trace(self.encoding) return g.toEncodedString(s,encoding=self.encoding,reportErrors=True) #@+node:ekr.20100811091636.5930: *4* run def run (self,event=None): fn = self.defaultOptionsDict.get('output-file-name','format-code.rst.txt') self.outputFileName = g.os_path_finalize_join(g.app.loadDir,fn) self.outputFile = StringIO() # Not a binary file. print('\n\n\n==========') self.writeTree(self.p.copy()) s = self.outputFile.getvalue() self.outputFile = open(self.outputFileName,'w') self.outputFile.write(s) self.outputFile.close() g.es('rst-format: wrote',self.outputFileName) #@+node:ekr.20100811091636.5987: *4* underline def underline (self,s,p): '''Return the underlining string to be used at the given level for string s.''' trace = False and not g.unitTesting # The user is responsible for top-level overlining. u = self.getOption('underline_characters') # '''#=+*^~"'`-:><_''' level = max(0,p.level()-self.topLevel) level = min(level+1,len(u)-1) # Reserve the first character for explicit titles. ch = u [level] if trace: g.trace(self.topLevel,p.level(),level,repr(ch),p.h) n = max(4,len(g.toEncodedString(s,encoding=self.encoding,reportErrors=False))) return '%s\n%s\n\n' % (p.h.strip(),ch*n) #@+node:ekr.20100811091636.5975: *4* write def write (self,s): # g.trace('%20s %20s %20s %s' % (self.p.h[:20],repr(s)[:20],repr(s)[-20:],g.callers(2))) # g.trace('%20s %40s %s' % (self.p.h[:20],repr(s)[:40],g.callers(2))) if g.isPython3: if g.is_binary_file(self.outputFile): s = self.encode(s) else: s = self.encode(s) self.outputFile.write(s) #@+node:ekr.20100811091636.5976: *4* writeBody & helpers def writeBody (self,p): trace = False self.p = p.copy() # for traces. if not p.b.strip(): return # No need to write any more newlines. showDocsAsParagraphs = self.getOption('show_doc_parts_as_paragraphs') lines = g.splitLines(p.b) parts = self.split_parts(lines,showDocsAsParagraphs) result = [] for kind,lines in parts: if trace: g.trace(kind,len(lines),p.h) if kind == '@rst-option': # Also handles '@rst-options' pass # The prepass has already handled the options. elif kind == '@rst-markup': lines.extend('\n') result.extend(lines) elif kind == '@doc': if showDocsAsParagraphs: result.extend(lines) result.append('\n') else: result.extend(self.write_code_block(lines)) elif kind == 'code': result.extend(self.write_code_block(lines)) else: g.trace('Can not happen',kind) # Write the lines with exactly two trailing newlines. s = ''.join(result).rstrip() + '\n\n' self.write(s) #@+node:ekr.20100811091636.6003: *5* split_parts def split_parts (self,lines,showDocsAsParagraphs): '''Split a list of body lines into a list of tuples (kind,lines).''' kind,parts,part_lines = 'code',[],[] for s in lines: if g.match_word(s,0,'@ @rst-markup'): if part_lines: parts.append((kind,part_lines[:]),) kind = '@rst-markup' n = len('@ @rst-markup') after = s[n:].strip() part_lines = g.choose(after,[after],[]) elif s.startswith('@ @rst-option'): if part_lines: parts.append((kind,part_lines[:]),) kind,part_lines = '@rst-option',[s] # part_lines will be ignored. elif s.startswith('@ ') or s.startswith('@\n') or s.startswith('@doc'): if showDocsAsParagraphs: if part_lines: parts.append((kind,part_lines[:]),) kind = '@doc' # Put only what follows @ or @doc n = g.choose(s.startswith('@doc'),4,1) after = s[n:].lstrip() part_lines = g.choose(after,[after],[]) else: part_lines.append(s) # still in code mode. elif g.match_word(s,0,'@c') and kind != 'code': if kind == '@doc' and not showDocsAsParagraphs: part_lines.append(s) # Show the @c as code. parts.append((kind,part_lines[:]),) kind,part_lines = 'code',[] else: part_lines.append(s) if part_lines: parts.append((kind,part_lines[:]),) return parts #@+node:ekr.20100811091636.6004: *5* write_code_block def write_code_block (self,lines): result = ['::\n\n'] # ['[**code block**]\n\n'] if self.getOption('number-code-lines'): i = 1 for s in lines: result.append(' %d: %s' % (i,s)) i += 1 else: result.extend([' %s' % (z) for z in lines]) s = ''.join(result).rstrip()+'\n\n' return g.splitLines(s) #@+node:ekr.20100811091636.5977: *4* writeHeadline & helper def writeHeadline (self,p): '''Generate an rST section if options permit it. Remove headline commands from the headline first, and never generate an rST section for @rst-option and @rst-options.''' docOnly = self.getOption('doc_only_mode') ignore = self.getOption('ignore_this_headline') showHeadlines = self.getOption('show_headlines') showThisHeadline = self.getOption('show_this_headline') showOrganizers = self.getOption('show_organizer_nodes') if ( p == self.topNode or ignore or docOnly or # handleDocOnlyMode handles this. not showHeadlines and not showThisHeadline or # docOnly and not showOrganizers and not thisHeadline or not p.h.strip() and not showOrganizers or not p.b.strip() and not showOrganizers ): return self.writeHeadlineHelper(p) #@+node:ekr.20100811091636.5978: *5* writeHeadlineHelper def writeHeadlineHelper (self,p): h = p.h.strip() # Remove any headline command before writing the headline. i = g.skip_ws(h,0) i = g.skip_id(h,0,chars='@-') word = h [:i].strip() if word: # Never generate a section for @rst-option or @rst-options or @rst-no-head. if word in ('@rst-option','@rst-options','@rst-no-head','@rst-no-leadlines'): return # Remove all other headline commands from the headline. # self.getOption('ignore_node_prefix'), # self.getOption('ignore_tree_prefix'), # self.getOption('show_headline_prefix'), ### for prefix in self.headlineCommands: for prefix in ('@rst-ignore-node','@rst-ignore-tree','@rst-ignore'): if word == prefix: h = h [len(word):].strip() break # New in Leo 4.4.4. # if word.startswith('@'): # if self.getOption('strip_at_file_prefixes'): # for s in ('@auto','@file','@nosent','@thin',): # if g.match_word(word,0,s): # h = h [len(s):].strip() if not h.strip(): return if self.getOption('show_sections'): self.write(self.underline(h,p)) else: self.write('\n**%s**\n\n' % h.replace('*','')) #@+node:ekr.20100811091636.5979: *4* writeNode def writeNode (self,p): '''Format a node according to the options presently in effect. Side effect: advance p''' h = p.h.strip() self.scanAllOptions(p) if self.getOption('ignore_this_tree'): p.moveToNodeAfterTree() elif self.getOption('ignore_this_node'): p.moveToThreadNext() elif g.match_word(h,0,'@rst-options') and not self.getOption('show_options_nodes'): p.moveToThreadNext() else: self.writeHeadline(p) self.writeBody(p) p.moveToThreadNext() #@+node:ekr.20100811091636.5981: *4* writeTree def writeTree(self,p): '''Write p's tree to self.outputFile.''' self.scanAllOptions(p) # So we can get the next option. if self.getOption('generate_rst_header_comment'): self.write('.. rst3: filename: %s\n\n' % self.outputFileName) # We can't use an iterator because we may skip parts of the tree. p = p.copy() # Only one copy is needed for traversal. self.topNode = p.copy() # Indicate the top of this tree. after = p.nodeAfterTree() while p and p != after: self.writeNode(p) # Side effect: advances p.
def get_csv(self,data): print "INICIO PROCESO WIZARD CSV" context = dict(self._context or {}) active_id = context.get('active_id', False) print "self>",self print "context>",context print "active_id>",active_id print "data>",data context = dict(self._context or {}) active_ids = data.get('active_ids', False) lines_total = [] productsx_ids = self.env['product.product'].search([('active', '=', True)]) for p in productsx_ids: print "p.name>>",p.name if p.attribute_line_ids: for a in p.attribute_line_ids: print "a.display_name>>",a.display_name if p.attribute_value_ids: for b in p.attribute_value_ids: print "b.display_name>>",b.name if self.report_option == '1': print "+++++++++++1111+++++++++++++" products_ids = self.env['product.product'].search([('active', '=', True)]) for p in products_ids: qty_sale = 0 print "p.name>>",p.name invoice_ids = self.env['account.invoice'].search([('date','>=', self.date_init), ('date','<=', self.date_end), ('type','=', 'in_invoice')]) print "+++++++++++11AA11+++++++++++++" for invoice in invoice_ids: print "invoice.name>>",invoice.id for line in invoice.invoice_line_ids: if line.product_id.id == p.id: print "line.product_id>>",line.product_id.id print "line.quantity>>",line.quantity qty_sale += line.quantity print "qty_sale>>",qty_sale if qty_sale != 0: name_aux = p.name print "p.name>>",p.name if p.attribute_line_ids: for a in p.attribute_line_ids: print "a.display_name>>",a.display_name name_aux = name_aux +"-"+a.display_name if p.attribute_value_ids: for b in p.attribute_value_ids: print "b.display_name>>",b.name name_aux = name_aux +"-"+b.name vals = { 'product_id':p.id, 'name':name_aux, 'category':p.categ_id.name or "", 'uom':p.product_tmpl_id.uom_id.name or "", 'qty_sale':qty_sale, } print "vals>>",vals lines_total.append(vals) elif qty_sale == 0 and self.products_without_rotation_view: name_aux = p.name print "p.name>>",p.name if p.attribute_line_ids: for a in p.attribute_line_ids: print "a.display_name>>",a.display_name name_aux = name_aux +"-"+a.display_name if p.attribute_value_ids: for b in p.attribute_value_ids: print "b.display_name>>",b.name name_aux = name_aux +"-"+b.name vals = { 'product_id':p.id, 'name':name_aux, 'category':p.categ_id.name, 'uom':p.product_tmpl_id.uom_id.name, 'qty_sale':0.0, } print "vals>>",vals lines_total.append(vals) print "+++++++++++22AA22+++++++++++++" print "+++++++++++22222+++++++++++++" print "Lines_total>>",lines_total for numPasada in range(len(lines_total)-1,0,-1): for i in range(numPasada): if lines_total[i]['qty_sale']<lines_total[i+1]['qty_sale']: temp = lines_total[i] lines_total[i] = lines_total[i+1] lines_total[i+1] = temp print "lines_total>>",lines_total path = '/tmp/file_%s.csv'% (datetime.today().strftime("%d-%m-%Y")) fp = StringIO() with open(path, 'w') as csvfile: csvfile.write("Reporte de; Rotacion de;Productos;\n") csvfile.write("Fecha Inicio:;{1};Fecha Fin;{3} \n".format("",self.date_init,"", self.date_init)) csvfile.write("Id Producto;Producto;Categoria; UM; Cantidad Comprada \n") for lin in lines_total: csvfile.write("{0};{1};{2};{3};{4} \n".format(lin['product_id'], lin['name'], lin['category'], lin['uom'], lin['qty_sale'])) fp.close() csvfile.close() arch = open(path, 'r').read() data = base64.encodestring(arch) attach_vals = { 'name':'Reporte de Rotacion de Productos %s.csv' % (datetime.today().strftime("%d-%m-%Y")), 'datas':data, 'datas_fname':'File#_%s.csv' % (datetime.today().strftime("%d-%m-%Y")), } doc_id = self.env['ir.attachment'].create(attach_vals) return { 'type' : "ir.actions.act_url", 'url': "web/content/?model=ir.attachment&id="+str(doc_id.id)+"&filename_field=datas_fname&field=datas&download=true&filename="+str(doc_id.name), 'target': "self", } if self.report_option == '2': print "+++++++++++1111+++++++++++++" products_ids = self.env['product.product'].search([('active', '=', True)]) for p in products_ids: qty_sale = 0 print "p.name>>",p.name invoice_ids = self.env['account.invoice'].search([('date','>=', self.date_init), ('date','<=', self.date_end), ('type','=', 'out_invoice')]) print "+++++++++++11AA11+++++++++++++" for invoice in invoice_ids: print "invoice.name>>",invoice.id for line in invoice.invoice_line_ids: if line.product_id.id == p.id: print "line.product_id>>",line.product_id.id print "line.quantity>>",line.quantity qty_sale += line.quantity print "qty_sale>>",qty_sale if qty_sale != 0: name_aux = p.name print "p.name>>",p.name if p.attribute_line_ids: for a in p.attribute_line_ids: print "a.display_name>>",a.display_name name_aux = name_aux +"-"+a.display_name if p.attribute_value_ids: for b in p.attribute_value_ids: print "b.display_name>>",b.name name_aux = name_aux +"-"+b.name vals = { 'product_id':p.id, 'name':name_aux, 'category':p.categ_id.name, 'uom':p.product_tmpl_id.uom_id.name, 'qty_sale':qty_sale, } print "vals>>",vals lines_total.append(vals) if qty_sale == 0 and self.products_without_rotation_view: name_aux = p.name print "p.name>>",p.name if p.attribute_line_ids: for a in p.attribute_line_ids: print "a.display_name>>",a.display_name name_aux = name_aux +"-"+a.display_name if p.attribute_value_ids: for b in p.attribute_value_ids: print "b.display_name>>",b.name name_aux = name_aux +"-"+b.name vals = { 'product_id':p.id, 'name':name_aux, 'category':p.categ_id.name, 'uom':p.product_tmpl_id.uom_id.name, 'qty_sale':0.0, } print "vals>>",vals lines_total.append(vals) print "+++++++++++22AA22+++++++++++++" print "+++++++++++22222+++++++++++++" print "Lines_total>>",lines_total for numPasada in range(len(lines_total)-1,0,-1): for i in range(numPasada): if lines_total[i]['qty_sale']<lines_total[i+1]['qty_sale']: temp = lines_total[i] lines_total[i] = lines_total[i+1] lines_total[i+1] = temp print "lines_total>>",lines_total path = '/tmp/file_%s.csv'% (datetime.today().strftime("%d-%m-%Y")) fp = StringIO() with open(path, 'w') as csvfile: csvfile.write("Reporte de; Rotacion de;Productos;\n") csvfile.write("Fecha Inicio:;{1};Fecha Fin;{3} \n".format("",self.date_init,"", self.date_init)) csvfile.write("Id Producto;Producto;Categoria; UM; Cantidad Vendida \n") for lin in lines_total: csvfile.write("{0};{1};{2};{3};{4} \n".format(lin['product_id'], lin['name'], lin['category'], lin['uom'], lin['qty_sale'])) fp.close() csvfile.close() arch = open(path, 'r').read() data = base64.encodestring(arch) attach_vals = { 'name':'Reporte de Rotacion de Productos %s.csv' % (datetime.today().strftime("%d-%m-%Y")), 'datas':data, 'datas_fname':'File#_%s.csv' % (datetime.today().strftime("%d-%m-%Y")), } doc_id = self.env['ir.attachment'].create(attach_vals) return { 'type' : "ir.actions.act_url", 'url': "web/content/?model=ir.attachment&id="+str(doc_id.id)+"&filename_field=datas_fname&field=datas&download=true&filename="+str(doc_id.name), 'target': "self", } return True
def mainCGI(): """ Main driver for running PDB2PQR from a web page """ print "Content-type: text/html\n" import cgi import cgitb cgitb.enable() form = cgi.FieldStorage() ff = form["FF"].value input = 0 apbs_input = form.has_key("INPUT") typemap = form.has_key("TYPEMAP") neutraln = form.has_key("NEUTRALN") neutralc = form.has_key("NEUTRALC") if HAVE_PDB2PQR_OPAL=="1": have_opal = True # Opal-specific import statments from AppService_client import AppServiceLocator, getAppMetadataRequest, launchJobRequest, launchJobBlockingRequest, getOutputAsBase64ByNameRequest from AppService_types import ns0 from ZSI.TC import String else: have_opal = False if have_opal: options = {} options["ff"] = ff fffile = None namesfile = None else: options = {"extensions":{}} if form.has_key("DEBUMP"): options["debump"] = 1 else: options["debump"] = 0 if form.has_key("OPT"): options["opt"] = 1 else: options["opt"] = 0 if form.has_key("PROPKA"): try: ph = float(form["PH"].value) if ph < 0.0 or ph > 14.0: raise ValueError options["ph"] = ph except ValueError: text = "The entered pH of %.2f is invalid! " % form["PH"].value text += "Please choose a pH between 0.0 and 14.0." #print "Content-type: text/html\n" print text sys.exit(2) if form.has_key("PDBID"): pdbfile = getPDBFile(form["PDBID"].value) pdbfilename = form["PDBID"].value elif form.has_key("PDB"): pdbfile = StringIO(form["PDB"].value) pdbfilename = form["PDB"].filename pdbfilename = re.split(r'[/\\]',pdbfilename)[-1] if form.has_key("INPUT"): input = 1 options["apbs"] = 1 if form.has_key("USERFF"): if have_opal: ffname = form["USERFF"].filename ffname = re.split(r'[/\\]',ffname)[-1] if ffname[-4:] == ".DAT": ffname = ffname[:-4] fffile = StringIO(form["USERFF"].value) namesfile = StringIO(form["USERNAMES"].value) options["ff"] = ffname options["userff"] = fffile options["usernames"] = namesfile else: userff = StringIO(form["USERFF"].value) usernames = StringIO(form["USERNAMES"].value) options["ff"] = "user-defined" options["userff"] = userff options["usernames"] = usernames if form.has_key("FFOUT"): if form["FFOUT"].value != "internal": options["ffout"] = form["FFOUT"].value if form.has_key("CHAIN"): options["chain"] = 1 if form.has_key("WHITESPACE"): options["whitespace"] = 1 if form.has_key("TYPEMAP"): options["typemap"] = 1 if form.has_key("NEUTRALN"): options["neutraln"] = 1 if form.has_key("NEUTRALC"): options["neutralc"] = 1 if form.has_key("LIGAND"): if have_opal: ligandfilename=str(form["LIGAND"].filename) ligandfilename=re.split(r'[/\\]',ligandfilename)[-1] # for Windows-style newline compatibility templigandfilename = tempfile.mkstemp()[1] templigandfile = open(templigandfilename,'w') templigandfile.write(form["LIGAND"].value) templigandfile.close() templigandfile = open(templigandfilename,'rU') if have_opal: options["ligand"] = templigandfile.read() else: templigandstring = templigandfile.read() # this variable is used again later to write this file to output options["ligand"] = StringIO(templigandstring) templigandfile.close() if not have_opal: pdbfilestring = pdbfile.read() pdblist, errlist = readPDB(StringIO(pdbfilestring)) dummydef = Definition() dummyprot = Protein(pdblist, dummydef) if len(pdblist) == 0 and len(errlist) == 0: text = "Unable to find PDB file - Please make sure this is " text += "a valid PDB file ID!" #print "Content-type: text/html\n" print text sys.exit(2) elif dummyprot.numAtoms() > MAXATOMS and "opt" in options: text = "<HTML><HEAD>" text += "<TITLE>PDB2PQR Error</title>" text += "<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\">" % STYLESHEET text += "</HEAD><BODY><H2>PDB2PQR Error</H2><P>" text += "Due to server limits, we are currently unable to optimize " text += "proteins of greater than MAXATOMS atoms on the server (PDB2PQR " text += "found %s atoms in the selected PDB file). If you " % dummyprot.numAtoms() text += "want to forgo optimization please try the server again.<P>" text += "Otherwise you may use the standalone version of PDB2PQR that " text += "is available from the <a href=\"http://pdb2pqr.sourceforge.net\">" text += "PDB2PQR SourceForge project page</a>." text += "<script type=\"text/javascript\">" text += "var gaJsHost = ((\"https:\" == document.location.protocol) ? \"https://ssl.\" : \"http://www.\");" text += "document.write(unescape(\"%3Cscript src=\'\" + gaJsHost + \"google-analytics.com/ga.js\' type=\'text/javascript\'%3E%3C/script%3E\"));" text += "</script>" text += "<script type=\"text/javascript\">" text += "try {" text += "var pageTracker = _gat._getTracker(\"UA-11026338-3\");" for key in options: text += "pageTracker._trackPageview(\"/main_cgi/has_%s_%s.html\");" % (key, options[key]) text += "pageTracker._trackPageview();" text += "} catch(err) {}</script>" text += "</BODY></HTML>" #print "Content-type: text/html\n" print text sys.exit(2) try: if have_opal: ligandFile=None ffFile=None namesFile=None #else: starttime = time.time() name = setID(starttime) os.makedirs('%s%s%s' % (INSTALLDIR, TMPDIR, name)) apbsInputFile = open('%s%s%s/apbs_input' % (INSTALLDIR, TMPDIR, name),'w') apbsInputFile.write(str(apbs_input)) apbsInputFile.close() typemapInputFile = open('%s%s%s/typemap' % (INSTALLDIR, TMPDIR, name),'w') typemapInputFile.write(str(typemap)) typemapInputFile.close() if have_opal: myopts="" for key in options: if key=="opt": if options[key]==0: # user does not want optimization key="noopt" else: # pdb2pqr optimizes by default, don't bother with flag continue elif key=="debump": if options[key]==0: # user does not want debumping key="nodebump" else: # pdb2pqr debumps by default, so change this flag to --nodebump continue elif key=="ph": val=options[key] key="with-ph=%s" % val elif key=="ffout": val=options[key] key="ffout=%s" % val elif key=="ligand": val=ligandfilename key="ligand=%s" % val ligandFile = ns0.InputFileType_Def('inputFile') ligandFile._name = val ligandFile._contents = options["ligand"] elif key=="apbs": key="apbs-input" elif key=="chain": key="chain" elif key=="whitespace": key="whitespace" elif key=="typemap": key="typemap" elif key=="ff": val=options[key] key="ff=%s" % val if fffile: ffFile = ns0.InputFileType_Def('inputFile') ffFile._name = val + ".DAT" ffFileString = fffile.read() ffFile._contents = ffFileString if namesfile: namesFile = ns0.InputFileType_Def('inputFile') namesFile._name = val + ".names" namesFileString = namesfile.read() namesFile._contents = namesFileString if key not in ["userff", "usernames"]: myopts+="--"+str(key)+" " myopts+=str(pdbfilename)+" " if pdbfilename[-4:]==".pdb": myopts+="%s.pqr" % str(pdbfilename[:-4]) else: myopts+="%s.pqr" % str(pdbfilename) appLocator = AppServiceLocator() appServicePort = appLocator.getAppServicePort(PDB2PQR_OPAL_URL) # launch job req = launchJobRequest() req._argList = myopts inputFiles = [] pdbOpalFile = ns0.InputFileType_Def('inputFile') pdbOpalFile._name = pdbfilename pdbOpalFile._contents = pdbfile.read() pdbfile.close() inputFiles.append(pdbOpalFile) if ligandFile: inputFiles.append(ligandFile) if ffFile: inputFiles.append(ffFile) if namesFile: inputFiles.append(namesFile) req._inputFile=inputFiles try: resp=appServicePort.launchJob(req) except Exception, e: printHeader("PDB2PQR Job Submission - Error") print "<BODY>\n<P>" print "There was an error with your job submission<br>" print "</P>" print "<script type=\"text/javascript\">" print "var gaJsHost = ((\"https:\" == document.location.protocol) ? \"https://ssl.\" : \"http://www.\");" print "document.write(unescape(\"%3Cscript src=\'\" + gaJsHost + \"google-analytics.com/ga.js\' type=\'text/javascript\'%3E%3C/script%3E\"));" print "</script>" print "<script type=\"text/javascript\">" print "try {" print "var pageTracker = _gat._getTracker(\"UA-11026338-3\");" for key in options: print "pageTracker._trackPageview(\"/main_cgi/has_%s_%s.html\");" % (key, options[key]) print "pageTracker._trackPageview();" print "} catch(err) {}</script>" print "</BODY>" print "</HTML>" sys.exit(2) #printHeader("PDB2PQR Job Submission",have_opal,jobid=resp._jobID) pdb2pqrOpalJobIDFile = open('%s%s%s/pdb2pqr_opal_job_id' % (INSTALLDIR, TMPDIR, name), 'w') pdb2pqrOpalJobIDFile.write(resp._jobID) pdb2pqrOpalJobIDFile.close() print redirector(name) if options.has_key("userff"): options["userff"] = ffFileString if options.has_key("usernames"): options["usernames"] = namesFileString # Recording CGI run information for PDB2PQR Opal pdb2pqrOpalLogFile = open('%s%s%s/pdb2pqr_opal_log' % (INSTALLDIR, TMPDIR, name), 'w') pdb2pqrOpalLogFile.write(str(options)+'\n'+str(ff)+'\n'+str(os.environ["REMOTE_ADDR"])) pdb2pqrOpalLogFile.close() else:
def detail(request): if request.method == 'POST': form = ImageUploadForm(request.POST, request.FILES) if form.is_valid(): p = Project() p.name = form.cleaned_data['name'] p. architect = form.cleaned_data['architect'] p.image_file = form.cleaned_data['image'] p.save() image_file = request.FILES['image'] image_str = '' for c in image_file.chunks(): image_str += c image_file_strio = StringIO(image_str) image = PImage.open(image_file_strio) if image.mode != "RGB": image = image.convert('RGB') width, height = image.size print "Original image: %s x %s" %(width, height) wh_ratio = float(width) / float(height) print "wh_ratio: %s " %wh_ratio if wh_ratio > 4.0/3.0 and height > 300: print "Horizontal image" ratio = 300.0 / float(height) print "ratio: %s" %ratio image.thumbnail((int(ceil(width * ratio)), 300), PImage.ANTIALIAS) width, height = image.size print "Thumbnail image: %s x %s" %(width, height) cropped_image = image.crop((int(width / 2) - 200, 0, int(width / 2) + 200, 300)) print "Cropped: %s x %s" %cropped_image.size else: if width > 400: ratio = 400.0 / float(width) image.thumbnail((400, int(ceil(height * ratio))), PImage.ANTIALIAS) width, height = image.size cropped_image = image.crop((0, int(height / 2) - 150, 400, int(height / 2) + 150)) else: cropped_image = image filename, ext = os.path.splitext(p.image_file.name) thumb_filename = settings.MEDIA_ROOT + filename + "-thumb" + ext #cropped_image.save(thumb_filename, "JPEG") f = StringIO() try: cropped_image.save(f, format='jpeg') s = f.getvalue() print type(p.thumbnail_file) p.thumbnail_file.save(thumb_filename, ContentFile(s)) p.save() finally: f.close() return HttpResponseRedirect('/') else: form = ImageUploadForm() return render(request, 'arkiwimain/detail.html', {'form': form })
def mainCGI(): """ Main driver for running PDB2PQR from a web page """ import cgi import cgitb cgitb.enable() form = cgi.FieldStorage() options = {"extensions": {}} ff = form["FF"].value input = 0 if form.has_key("DEBUMP"): options["debump"] = 1 if form.has_key("OPT"): options["opt"] = 1 if form.has_key("PROPKA"): try: ph = float(form["PH"].value) if ph < 0.0 or ph > 14.0: raise ValueError options["ph"] = ph except ValueError: text = "The entered pH of %.2f is invalid! " % form["PH"].value text += "Please choose a pH between 0.0 and 14.0." print "Content-type: text/html\n" print text sys.exit(2) if form.has_key("PDBID"): file = getPDBFile(form["PDBID"].value) elif form.has_key("PDB"): file = StringIO(form["PDB"].value) if form.has_key("INPUT"): input = 1 options["apbs"] = 1 if form.has_key("USERFF"): userff = StringIO(form["USERFF"].value) ff = "user-defined" options["userff"] = userff if form.has_key("FFOUT"): if form["FFOUT"].value != "internal": options["ffout"] = form["FFOUT"].value if form.has_key("CHAIN"): options["chain"] = 1 if form.has_key("LIGAND"): options["ligand"] = StringIO(form["LIGAND"].value) pdblist, errlist = readPDB(file) if len(pdblist) == 0 and len(errlist) == 0: text = "Unable to find PDB file - Please make sure this is " text += "a valid PDB file ID!" print "Content-type: text/html\n" print text sys.exit(2) elif len(pdblist) > 10000 and "opt" in options: text = "<HTML><HEAD>" text += "<TITLE>PDB2PQR Error</title>" text += "<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\">" % STYLESHEET text += "</HEAD><BODY><H2>PDB2PQR Error</H2><P>" text += "Due to server limits, we are currently unable to optimize " text += "proteins of greater than 10000 atoms on the server. If you " text += "want to forgo optimization please try the server again.<P>" text += "Otherwise you may use the standalone version of PDB2PQR that " text += "is available from the <a href=\"http://pdb2pqr.sourceforge.net\">" text += "PDB2PQR SourceForge project page</a>." text += "</BODY></HTML>" print "Content-type: text/html\n" print text sys.exit(2) try: starttime = time.time() name = setID(starttime) pqrpath = startServer(name) options["outname"] = pqrpath header, lines, missedligands = runPDB2PQR(pdblist, ff, options) file = open(pqrpath, "w") file.write(header) for line in lines: file.write("%s\n" % string.strip(line)) file.close() if input: from src import inputgen from src import psize method = "mg-auto" size = psize.Psize() size.parseInput(pqrpath) size.runPsize(pqrpath) async = 0 # No async files here! myinput = inputgen.Input(pqrpath, size, method, async) myinput.printInputFiles() endtime = time.time() - starttime createResults(header, input, name, endtime, missedligands) logRun(options, endtime, len(lines), ff, os.environ["REMOTE_ADDR"]) except StandardError, details: print "Content-type: text/html\n" print details createError(name, details)
err_handler("error sending message, returning it to host", pop, smtp) try: smtp.mail_to(user) except: err_handler("error sending message, returning it to host", pop, smtp) try: smtp.data(msg) except: err_handler("error sending message, returning it to host", pop, smtp) f.close() try: pop.delete(msgnum) except: err_handler("error deleting message %d" % msgnum, pop, smtp) pop.close() smtp.close() def err_handler(errmsg, pop, smtp): sys.stderr.write(sys.argv[1] + ": " + errmsg) pop.reset() pop.close() smtp.close()
def do_POST(self): try: eyeFiLogger.debug(self.command + " " + self.path + " " + self.request_version) SOAPAction = "" contentLength = "" # Loop through all the request headers and pick out ones that are relevant eyeFiLogger.debug("Headers received in POST request:") for headerName in self.headers.keys(): for headerValue in self.headers.getheaders(headerName): if (headerName == "soapaction"): SOAPAction = headerValue if (headerName == "content-length"): contentLength = int(headerValue) eyeFiLogger.debug(headerName + ": " + headerValue) # Read contentLength bytes worth of data eyeFiLogger.debug("Attempting to read " + str(contentLength) + " bytes of data") try: from StringIO import StringIO import tempfile except ImportError: eyeFiLogger.debug("No StringIO module") chunksize = 1048576 # 1MB mem = StringIO() while 1: remain = contentLength - mem.tell() if remain <= 0: break chunk = self.rfile.read(min(chunksize, remain)) if not chunk: break mem.write(chunk) postData = mem.getvalue() mem.close() eyeFiLogger.debug("Finished reading " + str(contentLength) + " bytes of data") # Perform action based on path and SOAPAction # A SOAPAction of StartSession indicates the beginning of an EyeFi # authentication request if ((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:StartSession\"")): eyeFiLogger.debug("Got StartSession request") response = self.startSession(postData) contentLength = len(response) eyeFiLogger.debug("StartSession response: " + response) self.send_response(200) self.send_header('Date', self.date_time_string()) self.send_header('Pragma', 'no-cache') self.send_header('Server', 'Eye-Fi Agent/2.0.4.0 (Windows XP SP2)') self.send_header('Content-Type', 'text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.end_headers() self.wfile.write(response) self.wfile.flush() self.handle_one_request() # GetPhotoStatus allows the card to query if a photo has been uploaded # to the server yet if ((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:GetPhotoStatus\"")): eyeFiLogger.debug("Got GetPhotoStatus request") response = self.getPhotoStatus(postData) contentLength = len(response) eyeFiLogger.debug("GetPhotoStatus response: " + response) self.send_response(200) self.send_header('Server', 'Eye-Fi UnderTheSea OSX/3.0.2') self.send_header('Connection', 'Keep-Alive') self.send_header('Keep-Alive', 'timeout=300, max=10') self.send_header('Date', self.date_time_string()) self.send_header('Content-Type', 'text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.end_headers() self.wfile.write(response) self.wfile.flush() # If the URL is upload and there is no SOAPAction the card is ready to send a picture to me if ((self.path == "/api/soap/eyefilm/v1/upload") and (SOAPAction == "")): eyeFiLogger.debug("Got upload request") response = self.uploadPhoto(postData) contentLength = len(response) eyeFiLogger.debug("Upload response: " + response) self.send_response(200) self.send_header('Date', self.date_time_string()) self.send_header('Pragma', 'no-cache') self.send_header('Server', 'Eye-Fi Agent/2.0.4.0 (Windows XP SP2)') self.send_header('Content-Type', 'text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.end_headers() self.wfile.write(response) self.wfile.flush() # If the URL is upload and SOAPAction is MarkLastPhotoInRoll if ((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:MarkLastPhotoInRoll\"")): eyeFiLogger.debug("Got MarkLastPhotoInRoll request") response = self.markLastPhotoInRoll(postData) contentLength = len(response) eyeFiLogger.debug("MarkLastPhotoInRoll response: " + response) self.send_response(200) self.send_header('Date', self.date_time_string()) self.send_header('Pragma', 'no-cache') self.send_header('Server', 'Eye-Fi Agent/2.0.4.0 (Windows XP SP2)') self.send_header('Content-Type', 'text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.send_header('Connection', 'Close') self.end_headers() self.wfile.write(response) self.wfile.flush() eyeFiLogger.debug("Connection closed.") except: eyeFiLogger.error("Got an an exception:") eyeFiLogger.error(traceback.format_exc()) raise
def mainCGI(): """ Opal driver for running PDB2PQR from a web page """ serviceURL = OPALURL cgitb.enable() form = cgi.FieldStorage() options = {} ff = form["FF"].value options["ff"] = ff fffile = None input = 0 if form.has_key("DEBUMP"): options["debump"] = 1 else: options["debump"] = 0 if form.has_key("OPT"): options["opt"] = 1 else: options["opt"] = 0 if form.has_key("PROPKA"): try: ph = float(form["PH"].value) if ph < 0.0 or ph > 14.0: raise ValueError options["ph"] = ph except ValueError: text = "The entered pH of %.2f is invalid! " % form["PH"].value text += "Please choose a pH between 0.0 and 14.0." # print "Content-type: text/html\n" print text sys.exit(2) if form.has_key("PDBID"): filename = form["PDBID"].value infile = getPDBFile(form["PDBID"].value) elif form.has_key("PDB"): filename = form["PDB"].filename filename=re.split(r'[/\\]',filename)[-1] infile = StringIO(form["PDB"].value) if form.has_key("INPUT"): input = 1 options["apbs"] = 1 if form.has_key("USERFF"): # userff = StringIO(form["USERFF"].value) # ff = "user-defined" # options["userff"] = userff ffname = form["USERFF"].filename ffname = re.split(r'[/\\]',ffname)[-1] fffile = StringIO(form["USERFF"].value) options["ff"] = ffname if form.has_key("FFOUT"): if form["FFOUT"].value != "internal": options["ffout"] = form["FFOUT"].value if form.has_key("CHAIN"): options["chain"] = 1 if form.has_key("WHITESPACE"): options["whitespace"] = 1 if form.has_key("LIGAND"): ligandfilename=str(form["LIGAND"].filename) ligandfilename=re.split(r'[/\\]',ligandfilename)[-1] options["ligand"] = StringIO(form["LIGAND"].value) try: # starttime = time.time() # name = setID(starttime) name = filename ligandFile=None ffFile=None # begin SOAP changes # need to switch options from a dictionary to something resembling a command line query # such as --chain myopts="" for key in options: if key=="opt": if options[key]==0: # user does not want optimization key="noopt" else: # pdb2pqr optimizes by default, don't bother with flag continue elif key=="debump": if options[key]==0: # user does not want debumping key="nodebump" else: # pdb2pqr debumps by default, so change this flag to --nodebump continue elif key=="ph": val=options[key] key="with-ph=%s" % val elif key=="ffout": val=options[key] key="ffout=%s" % val elif key=="ligand": val=ligandfilename key="ligand=%s" % val ligandFile = ns0.InputFileType_Def('inputFile') ligandFile._name = val ligandFileTemp = open(options["ligand"], "r") ligandFileString = ligandFileTemp.read() ligandFileTemp.close() ligandFile._contents = ligandFileString elif key=="apbs": key="apbs-input" elif key=="chain": key="chain" elif key=="whitespace": key="whitespace" elif key=="ff": val=options[key] key="ff=%s" % val if fffile: ffFile = ns0.InputFileType_Def('inputFile') ffFile._name = val ffFileTemp = open(fffile, "r") ffFileString = ffFileTemp.read() ffFileTemp.close() ffFile._contents = ffFileString myopts+="--"+str(key)+" " myopts+=str(filename)+" " myopts+="%s.pqr" % str(name) appLocator = AppServiceLocator() appServicePort = appLocator.getAppServicePort(serviceURL) # launch job req = launchJobRequest() req._argList = myopts inputFiles = [] pdbFile = ns0.InputFileType_Def('inputFile') pdbFile._name = filename pdbFile._contents = infile.read() infile.close() inputFiles.append(pdbFile) if ligandFile: inputFiles.append(ligandFile) if ffFile: inputFiles.append(ffFile) req._inputFile=inputFiles try: resp=appServicePort.launchJob(req) except Exception, e: printheader("PDB2PQR Job Submission - Error") print "<BODY>\n<P>" print "There was an error with your job submission<br>" print "</P>\n</BODY>" print "</HTML>" sys.exit(2) printheader("PDB2PQR Job Submission",resp._jobID)
def do_POST(self): try: eyeFiLogger.debug(self.command + " " + self.path + " " + self.request_version) SOAPAction = "" contentLength = "" # Loop through all the request headers and pick out ones that are relevant eyeFiLogger.debug("Headers received in POST request:") for headerName in self.headers.keys(): for headerValue in self.headers.getheaders(headerName): if( headerName == "soapaction"): SOAPAction = headerValue if( headerName == "content-length"): contentLength = int(headerValue) eyeFiLogger.debug(headerName + ": " + headerValue) # Read contentLength bytes worth of data eyeFiLogger.debug("Attempting to read " + str(contentLength) + " bytes of data") # postData = self.rfile.read(contentLength) try: from StringIO import StringIO import tempfile except ImportError: eyeFiLogger.debug("No StringIO module") chunksize = 1048576 # 1MB mem = StringIO() while 1: remain = contentLength - mem.tell() if remain <= 0: break chunk = self.rfile.read(min(chunksize, remain)) if not chunk: break mem.write(chunk) print remain print "Finished" postData = mem.getvalue() mem.close() eyeFiLogger.debug("Finished reading " + str(contentLength) + " bytes of data") # Perform action based on path and SOAPAction # A SOAPAction of StartSession indicates the beginning of an EyeFi # authentication request if((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:StartSession\"")): eyeFiLogger.debug("Got StartSession request") response = self.startSession(postData) contentLength = len(response) eyeFiLogger.debug("StartSession response: " + response) self.send_response(200) self.send_header('Date', self.date_time_string()) self.send_header('Pragma','no-cache') self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)') self.send_header('Content-Type','text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.end_headers() self.wfile.write(response) self.wfile.flush() self.handle_one_request() # GetPhotoStatus allows the card to query if a photo has been uploaded # to the server yet if((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:GetPhotoStatus\"")): eyeFiLogger.debug("Got GetPhotoStatus request") response = self.getPhotoStatus(postData) contentLength = len(response) eyeFiLogger.debug("GetPhotoStatus response: " + response) self.send_response(200) self.send_header('Date', self.date_time_string()) self.send_header('Pragma','no-cache') self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)') self.send_header('Content-Type','text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.end_headers() self.wfile.write(response) self.wfile.flush() # If the URL is upload and there is no SOAPAction the card is ready to send a picture to me if((self.path == "/api/soap/eyefilm/v1/upload") and (SOAPAction == "")): eyeFiLogger.debug("Got upload request") response = self.uploadPhoto(postData) contentLength = len(response) eyeFiLogger.debug("Upload response: " + response) self.send_response(200) self.send_header('Date', self.date_time_string()) self.send_header('Pragma','no-cache') self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)') self.send_header('Content-Type','text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.end_headers() self.wfile.write(response) self.wfile.flush() # If the URL is upload and SOAPAction is MarkLastPhotoInRoll if((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:MarkLastPhotoInRoll\"")): eyeFiLogger.debug("Got MarkLastPhotoInRoll request") response = self.markLastPhotoInRoll(postData) contentLength = len(response) eyeFiLogger.debug("MarkLastPhotoInRoll response: " + response) self.send_response(200) self.send_header('Date', self.date_time_string()) self.send_header('Pragma','no-cache') self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)') self.send_header('Content-Type','text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.send_header('Connection', 'Close') self.end_headers() self.wfile.write(response) self.wfile.flush() eyeFiLogger.debug("Connection closed.") except: eyeFiLogger.error("Got an an exception:") eyeFiLogger.error(traceback.format_exc()) raise
def mainCGI(): """ Main driver for running PDB2PQR from a web page """ import cgi import cgitb cgitb.enable() form = cgi.FieldStorage() options = {} ff = form["FF"].value input = 0 if form.has_key("DEBUMP"): options["debump"] = 1 if form.has_key("OPT"): options["opt"] = 1 if form.has_key("PROPKA"): try: ph = float(form["PH"].value) if ph < 0.0 or ph > 14.0: raise ValueError options["ph"] = ph except ValueError: text = "The entered pH of %.2f is invalid! " % form["PH"].value text += "Please choose a pH between 0.0 and 14.0." print "Content-type: text/html\n" print text sys.exit(2) if form.has_key("PDBID"): file = getFile(form["PDBID"].value) elif form.has_key("PDB"): file = StringIO(form["PDB"].value) if form.has_key("INPUT"): input = 1 pdblist, errlist = readPDB(file) if len(pdblist) == 0 and len(errlist) == 0: text = "Unable to find PDB file - Please make sure this is " text += "a valid PDB file ID!" print "Content-type: text/html\n" print text sys.exit(2) elif len(pdblist) > 10000 and "opt" in options: text = "<HTML><HEAD>" text += "<TITLE>PDB2PQR Error</title>" text += "<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\">" % STYLESHEET text += "</HEAD><BODY><H2>PDB2PQR Error</H2><P>" text += "Due to server limits, we are currently unable to optimize " text += "proteins of greater than 10000 atoms on the server. If you " text += "want to forgo optimization please try the server again.<P>" text += "Otherwise you may use the standalone version of PDB2PQR that " text += "is available from the <a href=\"http://pdb2pqr.sourceforge.net\">" text += "PDB2PQR SourceForge project page</a>." text += "</BODY></HTML>" print "Content-type: text/html\n" print text sys.exit(2) try: starttime = time.time() name = setID(starttime) pqrpath = startServer(name) options["outname"] = pqrpath header, lines = runPDB2PQR(pdblist, ff, options) file = open(pqrpath, "w") file.write(header) for line in lines: file.write("%s\n" % string.strip(line)) file.close() if input: from src import inputgen from src import psize method = "mg-auto" size = psize.Psize() size.parseInput(pqrpath) size.runPsize(pqrpath) async = 0 # No async files here! myinput = inputgen.Input(pqrpath, size, method, async) myinput.printInputFiles() endtime = time.time() - starttime createResults(header, input, name, endtime) logRun(form, endtime, len(pdblist)) except StandardError, details: print "Content-type: text/html\n" print details createError(name, details)
def _do_actions(self, context, actions): # Get API component. api = self.env[ScreenshotsApi] for action in actions: if action == 'get-file': context.req.perm.assert_permission('SCREENSHOTS_VIEW') # Get request arguments. screenshot_id = int(context.req.args.get('id') or 0) format = context.req.args.get('format') or self.default_format width = int(context.req.args.get('width') or 0) height = int(context.req.args.get('height') or 0) # Check if requested format is allowed. if not format in self.formats: raise TracError(_("Requested screenshot format that is not " "allowed."), _("Requested format not allowed.")) # Get screenshot. screenshot = api.get_screenshot(context, screenshot_id) # Check if requested screenshot exists. if not screenshot: if context.req.perm.has_permission('SCREENSHOTS_ADD'): context.req.redirect(context.req.href.screenshots( action = 'add')) else: raise TracError(_("Screenshot not found.")) # Set missing dimensions. width = width or screenshot['width'] height = height or screenshot['height'] if format == 'html': # Format screenshot for presentation. screenshot['author'] = format_to_oneliner(self.env, context, screenshot['author']) screenshot['name'] = format_to_oneliner(self.env, context, screenshot['name']) screenshot['description'] = format_to_oneliner(self.env, context, screenshot['description']) screenshot['time'] = pretty_timedelta(to_datetime( screenshot['time'], utc)) # For HTML preview format return template. context.req.data['screenshot'] = screenshot return ('screenshot', None) else: # Prepare screenshot filename. name, ext = os.path.splitext(screenshot['file']) format = (format == 'raw') and ext or '.' + format path = os.path.normpath(os.path.join(self.path, to_unicode( screenshot['id']))) filename = os.path.normpath(os.path.join(path, '%s-%sx%s%s' % (name, width, height, format))) orig_name = os.path.normpath(os.path.join(path, '%s-%sx%s%s' % (name, screenshot['width'], screenshot['height'], ext))) base_name = os.path.normpath(os.path.basename(filename)) self.log.debug('filename: %s' % (filename,)) # Create requested file from original if not exists. if not os.path.isfile(filename.encode('utf-8')): self._create_image(orig_name, path, name, format, width, height) # Guess mime type. file = open(filename.encode('utf-8'), "r") file_data = file.read(1000) file.close() mimeview = Mimeview(self.env) mime_type = mimeview.get_mimetype(filename, file_data) if not mime_type: mime_type = 'application/octet-stream' if 'charset=' not in mime_type: charset = mimeview.get_charset(file_data, mime_type) mime_type = mime_type + '; charset=' + charset # Send file to request. context.req.send_header('Content-Disposition', 'attachment;filename="%s"' % (base_name)) context.req.send_header('Content-Description', screenshot['description']) context.req.send_file(filename.encode('utf-8'), mime_type) elif action == 'add': context.req.perm.assert_permission('SCREENSHOTS_ADD') # Get request arguments. index = int(context.req.args.get('index') or 0) # Fill data dictionary. context.req.data['index'] = index context.req.data['versions'] = api.get_versions(context) context.req.data['components'] = api.get_components(context) # Return template with add screenshot form. return ('screenshot-add', None) elif action == 'post-add': context.req.perm.assert_permission('SCREENSHOTS_ADD') # Get image file from request. file, filename = self._get_file_from_req(context.req) name, ext = os.path.splitext(filename) ext = ext.lower() filename = name + ext # Is uploaded file archive or single image? if ext == '.zip': # Get global timestamp for all files in archive. timestamp = to_timestamp(datetime.now(utc)) # List files in archive. zip_file = ZipFile(file) for filename in zip_file.namelist(): # Test file extensions for supported type. name, ext = os.path.splitext(filename) tmp_ext = ext.lower()[1:] if tmp_ext in self.ext and tmp_ext != 'zip': # Decompress image file data = zip_file.read(filename) file = StringIO(data) filename = to_unicode(os.path.basename(filename)) # Screenshots must be identified by timestamp. timestamp += 1 # Create image object. image = Image.open(file) # Construct screenshot dictionary from form values. screenshot = {'name' : context.req.args.get('name'), 'description' : context.req.args.get('description'), 'time' : timestamp, 'author' : context.req.authname, 'tags' : context.req.args.get('tags'), 'file' : filename, 'width' : image.size[0], 'height' : image.size[1], 'priority' : int(context.req.args.get('priority') or '0')} self.log.debug('screenshot: %s' % (screenshot,)) # Save screenshot file and add DB entry. self._add_screenshot(context, api, screenshot, file) zip_file.close() else: # Create image object. image = Image.open(file) # Construct screenshot dictionary from form values. screenshot = {'name' : context.req.args.get('name'), 'description' : context.req.args.get('description'), 'time' : to_timestamp(datetime.now(utc)), 'author' : context.req.authname, 'tags' : context.req.args.get('tags'), 'file' : filename, 'width' : image.size[0], 'height' : image.size[1], 'priority' : int(context.req.args.get('priority') or '0')} self.log.debug('screenshot: %s' % (screenshot,)) # Add single image. self._add_screenshot(context, api, screenshot, file) # Close input file. file.close() # Clear ID to prevent display of edit and delete button. context.req.args['id'] = None elif action == 'edit': context.req.perm.assert_permission('SCREENSHOTS_EDIT') # Get request arguments. screenshot_id = context.req.args.get('id') # Prepare data dictionary. context.req.data['screenshot'] = api.get_screenshot(context, screenshot_id) elif action == 'post-edit': context.req.perm.assert_permission('SCREENSHOTS_EDIT') # Get screenshot arguments. screenshot_id = int(context.req.args.get('id') or 0) # Get old screenshot old_screenshot = api.get_screenshot(context, screenshot_id) # Check if requested screenshot exits. if not old_screenshot: raise TracError(_("Edited screenshot not found."), _("Screenshot not found.")) # Get image file from request. image = context.req.args['image'] if hasattr(image, 'filename') and image.filename: in_file, filename = self._get_file_from_req(context.req) name, ext = os.path.splitext(filename) filename = name + ext.lower() else: filename = None # Construct screenshot dictionary from form values. screenshot = {'name' : context.req.args.get('name'), 'description' : context.req.args.get('description'), 'author' : context.req.authname, 'tags' : context.req.args.get('tags'), 'components' : context.req.args.get('components') or [], 'versions' : context.req.args.get('versions') or [], 'priority' : int(context.req.args.get('priority') or '0')} # Update dimensions and filename if image file is updated. if filename: image = Image.open(in_file) screenshot['file'] = filename screenshot['width'] = image.size[0] screenshot['height'] = image.size[1] # Convert components and versions to list if only one item is # selected. if not isinstance(screenshot['components'], list): screenshot['components'] = [screenshot['components']] if not isinstance(screenshot['versions'], list): screenshot['versions'] = [screenshot['versions']] self.log.debug('screenshot: %s' % (screenshot)) # Edit screenshot. api.edit_screenshot(context, screenshot_id, screenshot) # Prepare file paths. if filename: name, ext = os.path.splitext(screenshot['file']) path = os.path.normpath(os.path.join(self.path, to_unicode( screenshot_id))) filepath = os.path.normpath(os.path.join(path, '%s-%ix%i%s' % (name, screenshot['width'], screenshot['height'], ext))) self.log.debug('path: %s' % (path,)) self.log.debug('filepath: %s' % (filepath,)) # Delete present images. try: for file in os.listdir(path): file = os.path.normpath(os.path.join(path, to_unicode(file))) os.remove(file.encode('utf-8')) except Exception, error: raise TracError(_("Error deleting screenshot. Original " "error message was: %s""") % (to_unicode(error),)) # Store uploaded image. try: out_file = open(filepath.encode('utf-8'), 'wb+') in_file.seek(0) shutil.copyfileobj(in_file, out_file) out_file.close() except Exception, error: try: os.remove(filepath.encode('utf-8')) except: pass raise TracError(_("Error storing file. Is directory " "specified in path config option in [screenshots] " "section of trac.ini existing? Original error " "message was: %s") % (to_unicode(error),)) # Notify change listeners. for listener in self.change_listeners: listener.screenshot_changed(context.req, screenshot, old_screenshot) # Clear ID to prevent display of edit and delete button. context.req.args['id'] = None
def true_close(self): return StringIO.close(self)
from urllib2 import * import StringIO #password_mgr = HTTPPasswordMgrWithDefaultRealm() url = "https://stream.twitter.com/1/statuses/sample.json" password_mgr.add_password(None, url, 'raspbmcbig33k', 'd0ggie') h = HTTPBasicAuthHandler(password_mgr) opener = build_opener(h) page = opener.open(url) io = StringIO(page.read()) print io.getvalue() io.close()
def do_POST(self): try: # eyeFiLogger.debug(self.command + " " + self.path + " " + self.request_version) SOAPAction = "" contentLength = "" # Loop through all the request headers and pick out ones that are relevant # eyeFiLogger.debug("Headers received in POST request:") for headerName in self.headers.keys(): for headerValue in self.headers.getheaders(headerName): if( headerName == "soapaction"): SOAPAction = headerValue if( headerName == "content-length"): contentLength = int(headerValue) # eyeFiLogger.debug(headerName + ": " + headerValue) # Read contentLength bytes worth of data # eyeFiLogger.debug("Attempting to read " + str(contentLength) + " bytes of data") # postData = self.rfile.read(contentLength) try: from StringIO import StringIO import tempfile except ImportError: eyeFiLogger.debug("No StringIO module") chunksize = 1048576 # 1MB mem = StringIO() while 1: remain = contentLength - mem.tell() if remain <= 0: break chunk = self.rfile.read(min(chunksize, remain)) if not chunk: break mem.write(chunk) print remain print "Finished" postData = mem.getvalue() mem.close() # eyeFiLogger.debug("Finished reading " + str(contentLength) + " bytes of data") # Perform action based on path and SOAPAction # A SOAPAction of StartSession indicates the beginning of an EyeFi authentication request if((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:StartSession\"")): eyeFiLogger.debug("Got StartSession request") response = self.startSession(postData) contentLength = len(response) # eyeFiLogger.debug("StartSession response: " + response) self.send_response(200) self.send_header('Date', self.date_time_string()) self.send_header('Pragma','no-cache') self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)') self.send_header('Content-Type','text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.end_headers() self.wfile.write(response) self.wfile.flush() self.handle_one_request() # GetPhotoStatus allows the card to query if a photo has been uploaded # to the server yet if((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:GetPhotoStatus\"")): eyeFiLogger.debug("Got GetPhotoStatus request") response = self.getPhotoStatus(postData) contentLength = len(response) # eyeFiLogger.debug("GetPhotoStatus response: " + response) self.send_response(200) self.send_header('Date', self.date_time_string()) self.send_header('Pragma','no-cache') self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)') self.send_header('Content-Type','text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.end_headers() self.wfile.write(response) self.wfile.flush() # If the URL is upload and there is no SOAPAction the card is ready to send a picture to me if((self.path == "/api/soap/eyefilm/v1/upload") and (SOAPAction == "")): # eyeFiLogger.debug("Got upload request") response = self.uploadPhoto(postData) contentLength = len(response) # eyeFiLogger.debug("Upload response: " + response) self.send_response(200) self.send_header('Date', self.date_time_string()) self.send_header('Pragma','no-cache') self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)') self.send_header('Content-Type','text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.end_headers() self.wfile.write(response) self.wfile.flush() # If the URL is upload and SOAPAction is MarkLastPhotoInRoll if((self.path == "/api/soap/eyefilm/v1") and (SOAPAction == "\"urn:MarkLastPhotoInRoll\"")): # eyeFiLogger.debug("Got MarkLastPhotoInRoll request") response = self.markLastPhotoInRoll(postData) contentLength = len(response) # eyeFiLogger.debug("MarkLastPhotoInRoll response: " + response) self.send_response(200) self.send_header('Date', self.date_time_string()) self.send_header('Pragma','no-cache') self.send_header('Server','Eye-Fi Agent/2.0.4.0 (Windows XP SP2)') self.send_header('Content-Type','text/xml; charset="utf-8"') self.send_header('Content-Length', contentLength) self.send_header('Connection', 'Close') self.end_headers() self.wfile.write(response) self.wfile.flush() # --------------- # format upload size uploaded_str = human_size(self.server.session_upload_size) # elapsed time (secs) elapsed_time = time.time() - self.server.session_start_time # formated into HH:MM:SS elapsed_str = str(timedelta(seconds=elapsed_time)) eyeFiLogger.debug("upload complete: duration: " + elapsed_str + ", uploaded: " + str(self.server.session_counter) + ", size: " + uploaded_str + ", total(history): " + str(self.server.global_counter)) # (JP) send notification on upload complete subprocess.call(['/usr/local/bin/eyefi-notify.sh', str(self.server.session_counter), uploaded_str, self.server.session_files]) # reset session counters self.server.session_files = "" self.server.session_counter = 0 self.server.session_upload_size = 0 self.server.session_start_time = 0; # --------------- except: eyeFiLogger.error("Got an an exception:") eyeFiLogger.error(traceback.format_exc()) raise
def run_python_cmd(self, cmd, std_pipe=None, input_var=None, stdout=None, stderr=None): """ Function to execute a python command, accepting an input variable and returning: 1. Result of python command 2. stdout of python command 3. stderr of python command The python command is executed using exec(cmd, self.globals, self.local) in order to maintain a separate "variable space" from the Pybash program. Unlike run_shell_cmd(), run_python_cmd() is blocking. A future improvement could be to execute the python command in a subprocess, and allow streaming data through std_pipe. Args: cmd (str): the shell command to execute std_pipe (list): list used to pass [input_var, stdout, stderr] if all three are available / required input_var: The input variable to the python command, which acts similarily to stdin for a shell command. This may be: 1. File-like object (open python file or pipe), which will be read and converted to a string 2. Python variable that will be passed directly 3. None: no input is provided to the python command stdout: The destination of the result(s) of the python command. This may be: 1. File-like object (open python file or pipe), result(s) of python command will be written here 2. subprocess.PIPE: a new collections.deque object will be created and result(s) of python command will be written here 3. collections.deque object, result(s) of python command will be written here 4. None: stdout will be written to sys.stdout (i.e. the terminal) stderr: The destination of the standard error generated by the python command. This may be: 1. File-like object (open python file or pipe), stderr of python command will be written here 2. subprocess.PIPE: a new collections.deque object will be created and stderr of python command will be written here 3. collections.deque object, stderr of python command will be written here 4. None: stdout will be written to sys.stdout (i.e. the terminal) Returns: tuple: File-like objects for stdout, stderr (stdout, stderr, None) .. note:: Since run_python_cmd() does not execute in a subprocess, no process is returned by this function. stdout and stderr are collections.deque objects, which typically have only one element. Additional elements are added by using redirects. For example, you can redirect the command's stderr to the stdout deque object. The stdout deque may contain (one or both): a) the result of evaluating a python statement (e.g. if cmd = '2+4', stdout = 6) b) the stdout resulting from executing the python statement (e.g. if cmd = 'print("foo")', stdout = 'foo') For example, if executing a function that contains a print() statement as well as returning a value, the stdout deque will contain both the return value and the printed text. The stderr deque will contain the errors generated by the python command (e.g. exception text) """ # Expand the std_pipe, handle redirects (create deque objects instead of os.pipe()) input_var, stdout, stderr = pipe_util.expand_std_pipe( std_pipe, input_var, stdout, stderr) self.write_debug( "Expanded std_pipe: %s" % [type(input_var), stdout, stderr], "run_python_cmd") #################################################################### # Step 1) Get input data, initialize __inputvar__ and __outputvar__ in self.locals # If the input_data is a file descriptor, read string if input_var and pipe_util.isfile(input_var): self.write_debug( "Reading file object %s and storing as input_var" % input_var, "run_python_cmd") input_var = pipe_util.read_close_fd(input_var) # Initialize the special __inputvar__ variable # Python 3.x: Use globals so commands like '[@[f] for f in @] for' # Python 2.x Use locals since this does not seem to be a problem if sys.version_info >= (3, 0): self.globals['__inputvar__'] = input_var else: self.locals['__inputvar__'] = input_var # Initialize __outputvar__ to None in self.locals - may have been set by a previous cmd self.locals['__outputvar__'] = None #################################################################### # Step 2: Compile the command # a) Check to see if the command references the input variable if '@' in cmd: cmd = cmd.replace("@", "__inputvar__") # b) Check to see if this is command that can be assigned to a variable # - there is probably a better way to do this without using a try-catch # TODO: ugly nested try-catch try: assignment_cmd = "__outputvar__ = " + cmd # self.write_debug("test: %s" % assignment_cmd, "run_python_cmd") cmd_c = compile(assignment_cmd, '<pybash>', 'exec') cmd = assignment_cmd capture_output_var = True except (SyntaxError, TypeError) as e: # c) Attempt to compile original cmd try: cmd_c = compile(cmd, '<pybash>', 'exec') capture_output_var = False except (SyntaxError, TypeError) as e: self.print_error("Could not compile: %s" % cmd) self.stderr_write(str(e) + '\n') self.stderr_write(traceback.format_exc()) return None, None, None self.write_debug("Successfully compiled python: %s" % (cmd), "run_python_cmd") #################################################################### # Step 3: Execute the command # a) Create StringIO for out/error # - after doing this, don't print anything until out/err are restored! if sys.version_info >= (3, 0): out = StringIO() err = StringIO() else: out = StringIO.StringIO() err = StringIO.StringIO() # Capture out/err sys.stdout = out sys.stderr = err # b) run command in try-catch # - if any errors occur, they will get added to std_pipe[2] try: exec(cmd_c, self.globals, self.locals) except Exception as e: sys.stderr.write(str(e) + '\n') pass # c) restore orig out/err sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ # get out/err values and close stdout_val = out.getvalue() stderr_val = err.getvalue() out.close() err.close() self.write_debug("Restored stdout/stderr", "run_python_cmd") #################################################################### # Step 4: Add output to pipe or print to sys.stdout/sys.stderr # Define source lists for output + error pipes # - __outputvar__ may not be defined if something went wrong try: stdout_src_list = [self.locals['__outputvar__'], stdout_val] except UnboundLocalError: stdout_src_list = [None, stdout_val] stderr_src_list = [stderr_val] # Define the mappings between the (name, output pipe, output print function, output source list) output_mapping = [ ("stdout", stdout, self.stdout_write, stdout_src_list), ("stderr", stderr, self.stderr_write, stderr_src_list) ] # Process each output mapping for name, pipe, print_fn, src_list in output_mapping: # Process each source in the list for src in src_list: if not src: continue # If pipe is a deque, appendleft if type(pipe) == deque: self.write_debug("Adding src to %s queue" % name, "run_python_cmd") pipe.appendleft(src) # If pipe is a file-like object, write to file elif pipe_util.isfile(pipe): self.write_debug( "Writing %s src to file %s" % (name, pipe), "run_python_cmd") pipe.write(pybash_helper.to_str(src)) # If pipe is None, print using print function elif pipe is None: self.write_debug( "Printing %s with function %s" % (name, print_fn), "run_python_cmd") print_fn(src) # Otherwise, unrecognized pipe type else: raise ValueError( "Unrecognized pipe type for %s: %s" % name, type(pipe)) # Close any open file handles and return output # - there is no subprocess for python commands output = [stdout, stderr, None] for i in range(2): if pipe_util.isfile(output[i]): if output[i].closed is False: output[i].close() # Replace with None so that run_cmd() knows not to expect anything output[i] = None return output