def numberOfModifiedRecord(self): indexer = self.services.getIndexer() portalQuery = self.services.getPortalManager().get(self.portal.getName()).getQuery() portalSearchQuery = self.services.getPortalManager().get(self.portal.getName()).getSearchQuery() # Security prep work current_user = self.page.authentication.get_username() security_roles = self.page.authentication.get_roles_list() security_filter = 'security_filter:("' + '" OR "'.join(security_roles) + '")' security_exceptions = 'security_exception:"' + current_user + '"' owner_query = 'owner:"' + current_user + '"' security_query = "(" + security_filter + ") OR (" + security_exceptions + ") OR (" + owner_query + ")" req = SearchRequest("modified:true") req.setParam("fq", 'item_type:"object"') if portalQuery: req.addParam("fq", portalQuery) if portalSearchQuery: req.addParam("fq", portalSearchQuery) req.addParam("fq", "") req.setParam("rows", "0") if not self.page.authentication.is_admin(): req.addParam("fq", security_query) out = ByteArrayOutputStream() indexer.search(req, out) self.__result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) return self.__result.get("response/numFound")
def __getUsers(self, oid): indexer = Services.getIndexer() req = SearchRequest("id:" + oid) req.setParam("fl", "security_exception,owner") out = ByteArrayOutputStream() indexer.search(req, out) rtJson = "" try: qresult = SolrResult(ByteArrayInputStream( out.toByteArray())).getResults().get(0) owner = qresult.getString(None, 'owner') secException = qresult.getArray('security_exception') if secException is None: secException = JSONArray() self.log.debug("Owner of object: " + owner) self.log.debug("Viewer(s) of object: " + secException.toString()) if secException.contains(owner): secException.remove(owner) return '{"owner":"' + owner + '", "viewers": ' + secException.toString( ) + '}' except Exception, e: self.log.error("Error during query/package ownership data" + str(e))
def __isLinked(self, ids, map): query = 'package_node_id:("' + '" OR "'.join(ids) + '")' req = SearchRequest(query) req.setParam("fq", 'recordtype:"master"') req.addParam("fq", 'item_type:"object"') req.setParam("rows", "9999") out = ByteArrayOutputStream() self.__indexer.search(req, out) result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) currentList = [] for doc in result.getJsonList("response/docs"): currentList.extend(doc.getList("package_node_id")) if type(map).__name__ == "LinkedHashMap": for author in map.keySet(): authorDocs = map.get(author) for doc in authorDocs: if doc.get("id") in currentList: doc.set("linked", "true") else: for author in map.keys(): authorList = map[author] for count in authorList: doc = authorList[count] if doc.get("id") in currentList: doc.set("linked", "true")
def numberOfModifiedRecord(self): indexer = self.services.getIndexer() portalQuery = self.services.getPortalManager().get(self.portal.getName()).getQuery() portalSearchQuery = self.services.getPortalManager().get(self.portal.getName()).getSearchQuery() # Security prep work current_user = self.page.authentication.get_username() security_roles = self.page.authentication.get_roles_list() security_filter = 'security_filter:("' + '" OR "'.join(security_roles) + '")' security_exceptions = 'security_exception:"' + current_user + '"' owner_query = 'owner:"' + current_user + '"' security_query = "(" + security_filter + ") OR (" + security_exceptions + ") OR (" + owner_query + ")" req = SearchRequest("modified:true") req.setParam("fq", 'item_type:"object"') if portalQuery: req.addParam("fq", portalQuery) if portalSearchQuery: req.addParam("fq", portalSearchQuery) req.addParam("fq", "") req.setParam("rows", "0") if not self.page.authentication.is_admin(): req.addParam("fq", security_query) out = ByteArrayOutputStream() indexer.search(req, out) self.__result = JsonSimpleConfig(ByteArrayInputStream(out.toByteArray())) return self.__result.getString(None, "response", "numFound")
def handleGrantNumber(self): out = ByteArrayOutputStream() req = SearchRequest("grant_numbers:%s*" % self.term) req.setParam("fq", 'item_type:"object"') req.setParam("fq", 'workflow_id:"dataset"') req.setParam("rows", "1000") self.indexer.search(req, out) res = SolrResult(ByteArrayInputStream(out.toByteArray())) hits = HashSet() if (res.getNumFound() > 0): creatorResults = res.getResults() for creatorRes in creatorResults: creatorList = creatorRes.getList("grant_numbers") if (creatorList.isEmpty()==False): for hit in creatorList: hits.add(hit) self.writer.print("[") hitnum = 0 for hit in hits: if (hitnum > 0): self.writer.print(",\"%s\"" % hit) else: self.writer.print("\"%s\"" % hit) hitnum += 1 self.writer.print("]") else: self.writer.println("[\"\"]") self.writer.close()
def __feed(self): portal = Services.getPortalManager().get(portalId) recordsPerPage = portal.recordsPerPage pageNum = sessionState.get("pageNum", 1) query = "*:*" req = SearchRequest(query) req.setParam("facet", "true") req.setParam("rows", str(recordsPerPage)) req.setParam("facet.field", portal.facetFieldList) req.setParam("facet.sort", "true") req.setParam("facet.limit", str(portal.facetCount)) req.setParam("sort", "f_dc_title asc") portalQuery = portal.query if portalQuery: req.addParam("fq", portalQuery) else: fq = sessionState.get("fq") req.setParam("fq", fq) req.setParam("start", str((pageNum - 1) * recordsPerPage)) print " * query: ", query print " * portalQuery='%s'" % portalQuery print " * feed.py:", req.toString() out = ByteArrayOutputStream() Services.indexer.search(req, out) self.__result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray()))
def checkAprovedRequests(self, provisioned=0, startPage=1): """ A customised query for package type of arms at workflow_step of arms-approved Get a list of requests filtered by provisioning_checklist """ workflowStep = "arms-approved" if self.packageType: req = SearchRequest(self.packageType) else: req = SearchRequest("packageType:arms") req.addParam("fq", 'workflow_step:' + workflowStep) if provisioned: req.addParam("fq", '-provisioning_checklist.4:null') else: req.addParam("fq", 'provisioning_checklist.4:null') req.setParam("sort", "date_object_modified desc, f_dc_title asc") req.setParam("fl","id,dc_title,date-provisioned") out = ByteArrayOutputStream() self.indexer.search(req, out) solrResults = SolrResult(ByteArrayInputStream(out.toByteArray())) if solrResults: results = solrResults.getResults() if results: results = self.mergeEvents(results, ["arms_draft","arms_redraft","arms_review","arms_approved","arms_rejected"]) self._setPaging(results.size()) return results else: return ArrayList()
def handle(self): try: raw_xml = self.rfile.readline().strip() marc_xmlfile = NamedTemporaryFile(delete=False) marc_xmlfile.write(raw_xml) marc_xmlfile.close() print(marc_xmlfile.name) dynamic_context = saxon.query.DynamicQueryContext(CONFIG) ## xml_doc = CONFIG.buildDocument( ## StreamSource(ByteArrayInputStream(raw_xml))) ## dynamic_context.setContextItem(xml_doc) dynamic_context.setParameter( "baseuri", INFO.get('base_uri', 'http://catalog/')) dynamic_context.setParameter( "marcxmluri", os.path.normpath(marc_xmlfile.name).replace("\\", "/")) dynamic_context.setParameter("serialization", "rdfxml") output_stream = ByteArrayOutputStream() result = StreamResult(output_stream) print("Before query") COMPLIED_XQUERY.run(dynamic_context, result, None) self.wfile.write(output_stream.toString().encode('ascii', errors='ignore')) os.remove(marc_xmlfile.name) except: self.wfile.write("Error processing MARC XML:\n\t{}".format(sys.exc_info()[0]))
def __load(self, oid): template = """<div class="title" /><div class="page-toc" /><div class="body"><div>%s</div></div>""" print "Loading HTML preview for %s..." % oid if oid == "blank": return template % "<p>This page intentionally left blank.</p>" else: object = Services.getStorage().getObject(oid) # get preview payload or source if no preview pid = self.__getPreviewPid(object) payload = object.getPayload(pid) mimeType = payload.getContentType() print "pid=%s mimeType=%s" % (pid, mimeType) isHtml = mimeType in ["text/html", "application/xhtml+xml"] if isHtml or mimeType.startswith("text/"): out = ByteArrayOutputStream() IOUtils.copy(payload.open(), out) content = out.toString("UTF-8") if content.find('class="body"'): ## assumes ICE content return content elif isHtml: return template % content elif mimeType == "text/plain": return template % ('<pre>%s</pre>' % content) else: return content elif mimeType.startswith("image/"): return template % ('<div rel="%s"><img src="%s" /></div>' % (oid, pid)) else: return '<a href="%s" rel="%s">%s</a>' % (oid, mimeType, pid) payload.close() object.close()
def checkEventLogForEdits(self, oid): req = SearchRequest("oid:" + oid + " AND context:Workflow AND eventType:Save") out = ByteArrayOutputStream() self.indexer.searchByIndex(req, out, "eventLog") solrResult = SolrResult(ByteArrayInputStream(out.toByteArray())) return solrResult.getRows() > 0
def __search(self): indexer = Services.getIndexer() portalQuery = Services.getPortalManager().get(portalId).getQuery() req = SearchRequest("last_modified:[NOW-1MONTH TO *]") req.setParam("fq", 'item_type:"object"') if portalQuery: req.addParam("fq", portalQuery) req.setParam("rows", "10") req.setParam("sort", "last_modified desc, f_dc_title asc"); out = ByteArrayOutputStream() indexer.search(req, out) self.__latest = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) req = SearchRequest("*:*") req.setParam("fq", 'item_type:"object"') if portalQuery: req.addParam("fq", portalQuery) req.addParam("fq", "") req.setParam("rows", "0") out = ByteArrayOutputStream() indexer.search(req, out) sessionState.set("fq", 'item_type:"object"') #sessionState.set("query", portalQuery.replace("\"", "'")) self.__result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray()))
def tostring(element_or_tree, encoding=None, method='xml', xml_declaration=None, pretty_print=False, with_tail=True, standalone=None, doctype=None, exclusive=False, with_comments=True, inclusive_ns_prefixes=None): if isinstance(element_or_tree, _ElementTree): source = DOMSource(element_or_tree._dom_doc) else: source = DOMSource(element_or_tree._dom_element) outputstream = ByteArrayOutputStream() result = StreamResult(outputstream) transformer = transformfac.newTransformer() if xml_declaration: transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, 'no') else: transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, 'yes') if pretty_print: transformer.setOutputProperty(OutputKeys.INDENT, 'yes') else: transformer.setOutputProperty(OutputKeys.INDENT, 'no') transformer.transform(source, result) if encoding is None: encoding = 'ascii' return outputstream.toString(encoding)
def crawl(site, trm , depth, linksfile): from java.net import URL from org.w3c.tidy import Tidy pattern = re.compile('href="/wiki/(.*?)"') f = open(linksfile, 'a+') #try: if depth < MAX_DEPTH: print 'crawling [%s]...' % trm, print >> f, '[%s]' % trm td = Tidy() td.setXmlOut(1) u = URL(site + trm) input = BufferedInputStream(u.openStream()) output = ByteArrayOutputStream() #tidy.setInputEncoding("UTF8") #tidy.setOutputEncoding("UTF8") td.parse(input, output) content = output.toString() hits = pattern.findall(content) for hit in hits: if hit.find(":") == -1: print >> f, hit print 'done.' print >> f, '' for hit in hits: if hit.find(":") == -1: crawl(site, hit, depth + 1, linksfile) #except: # print "wrong" f.close()
def getPayloadContent(self): format = self.__metadata.getField("dc_format") slash = self.__oid.rfind("/") pid = self.__oid[slash+1:] print " *** payload content, format: %s, pid: %s *** " % (format, pid) contentStr = "" if format.startswith("text"): contentStr = "<pre>" payload = self.__storage.getPayload(self.__oid, pid) str = StringWriter() IOUtils.copy(payload.getInputStream(), str) contentStr += str.toString() contentStr += "</pre>" elif format.find("vnd.ms-")>-1 or format.find("vnd.oasis.opendocument.")>-1: #get the html version if exist.... pid = pid[:pid.find(".")] + ".htm" payload = self.__storage.getPayload(self.__oid, pid) saxReader = SAXReader() document = saxReader.read(payload.getInputStream()) slideNode = document.selectSingleNode("//div[@class='body']") #linkNodes = slideNode.selectNodes("//img") #contentStr = slideNode.asXML(); # encode character entities correctly out = ByteArrayOutputStream() format = OutputFormat.createPrettyPrint() format.setSuppressDeclaration(True) writer = XMLWriter(out, format) writer.write(slideNode) writer.close() contentStr = out.toString("UTF-8") return contentStr
def showImgWithLegend(width=None, height=None): """ This function shows the image and legend from current IDV window while in GUI mode. Optional arguments are width and height in pixels, they currently default to 600 and 400""" from java.util import Base64 ##only in java8 from javax.imageio import ImageIO from java.io import ByteArrayOutputStream from ucar.unidata.ui.ImageUtils import resize, toBufferedImage import java import java.awt.Robot as Robot import java.awt.Rectangle as Rectangle import java.awt.Toolkit as Toolkit from ucar.unidata.util import Misc VM = idv.getViewManager() VMC = VM.getContents() VMCC = VMC.getComponent( 1 ) # the view and legend ; 0 is left most part of view window with controls for perspective views siz = VMCC.getSize() loc = VMCC.getLocationOnScreen() gc = VMCC.getGraphicsConfiguration() loc.x -= gc.getBounds().x loc.y -= gc.getBounds().y robotx = Robot(gc.getDevice()) VM.toFront() Misc.sleep(250) img = robotx.createScreenCapture( Rectangle(loc.x, loc.y, siz.width, siz.height)) if width != None and height != None: img = toBufferedImage(resize(img, width, height)) bos = ByteArrayOutputStream() ImageIO.write(img, "png", Base64.getEncoder().wrap(bos)) data = bos.toString("UTF-8") return {"display": "image", "data": data}
def handleWorkflowStep(self): out = ByteArrayOutputStream() req = SearchRequest("workflow_step_label:[* TO *]" ) req.setParam("fq", 'item_type:"object"') req.setParam("fq", 'workflow_id:"dataset"') req.setParam("rows", "1000") self.indexer.search(req, out) res = SolrResult(ByteArrayInputStream(out.toByteArray())) hits = HashSet() if (res.getNumFound() > 0): recordTypeResults = res.getResults() for recordTypeResult in recordTypeResults: recordTypeList = recordTypeResult.getList("workflow_step_label") if (recordTypeList.isEmpty()==False): for hit in recordTypeList: hits.add(hit) self.writer.println("[") hitnum = 0 for hit in hits: if (hitnum > 0): self.writer.println(",{\"value\": \"%s\",\n\"label\": \"%s\"}" % (hit,hit)) else: self.writer.println("{\"value\": \"%s\",\n\"label\": \"%s\"}" % (hit,hit)) hitnum += 1 self.writer.println("]") else: self.writer.println("[\"\"]") self.writer.close()
def __search(self): query = formData.get("query") searchQuery = sessionState.get("searchQuery") if query is None or query == "": query = "*:*" if searchQuery and query == "*:*": query = searchQuery elif searchQuery: query += " AND " + searchQuery facetField = formData.get("facet.field") req = SearchRequest(query) req.setParam("facet", "true") req.setParam("fl", "id") req.setParam("rows", "0") req.setParam("facet.limit", "-1") req.setParam("facet.field", facetField) fq = sessionState.get("fq") if fq is not None: req.setParam("fq", fq) req.addParam("fq", 'item_type:"object"') # Make sure 'fq' has already been set in the session security_roles = self.authentication.get_roles_list(); security_query = 'security_filter:("' + '" OR "'.join(security_roles) + '")' req.addParam("fq", security_query) out = ByteArrayOutputStream() indexer = Services.getIndexer() indexer.search(req, out) result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) self.__facetList = FacetList(facetField, result)
def __getSolrData(self): prefix = self.getSearchTerms() if prefix != "": terms = prefix.split(" ") if len(terms)>1: termsQuery = " OR %s" ' OR '.join(terms) else: termsQuery = "" queryValue = "%(prefix)s OR %(prefix)s*%(terms)s" % { "prefix": prefix, "terms": termsQuery } query = 'dc_title:(%(qv)s)^2 OR dc_identifier:(%(qv)s)^0.5' % { "qv": queryValue } else: query = "*:*" portal = self.services.portalManager.get(self.portalId) sq = portal.searchQuery if sq not in ["", "*:*"]: query = query + " AND " + portal.searchQuery req = SearchRequest(query) req.setParam("fq", 'item_type:"object"') if portal.query: req.addParam("fq", portal.query) req.setParam("fl", "score") req.setParam("sort", "score desc, f_dc_title asc") req.setParam("start", self.getStartIndex()) req.setParam("rows", self.getItemsPerPage()) try: out = ByteArrayOutputStream() indexer = self.services.getIndexer() indexer.search(req, out) return SolrResult(ByteArrayInputStream(out.toByteArray())) except Exception, e: self.log.error("Failed to lookup '{}': {}", prefix, e.getMessage())
def pageContent(self): # Object ID oid = self.metadata.get("id") # Determine MIME Type mimeType = "Unknown" mimeList = self.metadata.getList("dc_format") if mimeList is not None and not mimeList.isEmpty(): mimeType = mimeList.get(0) # The HTML payload is the real object, display in a frame because we # have no idea what kind of object it is. if mimeType == "text/html": objectPath = "http://%s:%s%s/%s/download/%s/" % \ (self.req.serverName, self.serverPort, self.contextPath, self.portalId, oid) objectLink = '<a class="iframe-link-alt" href="%s">View outside the frame</a>' % objectPath objectFrame = '<iframe class="iframe-preview" src="%s"></iframe>' % objectPath return objectLink + "<br/>" + objectFrame # We are just rendering a HTML preview else: preview = self.metadata.get("preview") try: object = Services.getStorage().getObject(oid) payload = object.getPayload(preview) out = ByteArrayOutputStream() IOUtils.copy(payload.open(), out) payload.close() return out.toString("UTF-8") except StorageException, e: return
def findPublishedRecords(self): #req = SearchRequest("published:\"true\"") req = SearchRequest("storage_id:\"c6a214670dc644e5ebdaede4a2243f67\"") out = ByteArrayOutputStream() self.indexer.search(req, out) solrResult = SolrResult(ByteArrayInputStream(out.toByteArray())) return solrResult.getResults()
def parseFFmpeg(self, parent): if parent is not None: object = parent.getObject() if object is not None: payload = None try: payload = object.getPayload("ffmpeg.info") # Stream the content out to string out = ByteArrayOutputStream() IOUtils.copy(payload.open(), out) payload.close() self.__ffmpegRaw = out.toString("UTF-8") out.close() payload.close() # And parse it self.__ffmpegData = JsonSimple(self.__ffmpegRaw) if self.__ffmpegData is None: return False else: self.__ffmpegOutputs = self.__ffmpegData.getJsonSimpleMap(["outputs"]) return True except: if payload is not None: payload.close() return False
def _toPDF(fis) : b = ByteArrayOutputStream() # create a new document document = Document(); # get Instance of the PDFWriter pdfWriter = PdfWriter.getInstance(document, b) # document header attributes document.addAuthor("betterThanZero") document.addCreationDate() document.addProducer() document.addCreator("MySampleCode.com") document.addTitle("Demo for iText XMLWorker") document.setPageSize(PageSize.LETTER); # open document document.open(); # To convert a HTML file from the filesystem # String File_To_Convert = "docs/SamplePDF.html"; # //FileInputStream fis = new FileInputStream(File_To_Convert); # URL for HTML page # get the XMLWorkerHelper Instance worker = XMLWorkerHelper.getInstance(); # convert to PDF worker.parseXHtml(pdfWriter, document, fis); # close the document document.close(); # close the writer pdfWriter.close(); return b.toByteArray()
def __getSolrData(self): prefix = self.getSearchTerms() if prefix: query = '%(prefix)s OR %(prefix)s*' % { "prefix" : prefix } else: query = "*:*" level = self.getFormData("level", None) if level is not None: if level=="top": #query += " AND skos_hasTopConcept:http*" query += ' AND dc_identifier:"http://purl.org/anzsrc/seo/#division"' else: query += ' AND skos_broader:"%s"' % level req = SearchRequest(query) req.addParam("fq", 'item_type:"object"') req.addParam("fq", 'repository_type:"SEO"') req.setParam("fl", "score") req.setParam("sort", "score desc") req.setParam("start", self.getStartIndex()) req.setParam("rows", self.getItemsPerPage()) try: out = ByteArrayOutputStream() indexer = self.services.getIndexer() indexer.search(req, out) return JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) except Exception, e: self.log.error("Failed to lookup '{}': {}", prefix, str(e))
def showImgWithFullWindow(width=None,height=None): """ This function shows the image from current IDV window while in GUI mode. optional arguments are width and height in pixels, they currently default to 600 and 400""" from java.util import Base64 ##only in java8 from javax.imageio import ImageIO from java.io import ByteArrayOutputStream from ucar.unidata.ui.ImageUtils import resize,toBufferedImage import java import java.awt.Robot as Robot import java.awt.Rectangle as Rectangle import java.awt.Toolkit as Toolkit from ucar.unidata.util import Misc VM=idv.getViewManager() myframe=VM.getDisplayWindow().getComponent() robotx = Robot(myframe.getGraphicsConfiguration().getDevice()) VM.toFront(); #robotx.delay(250) Misc.sleep(350) pause() img=robotx.createScreenCapture(Rectangle( myframe.getX(),myframe.getY(),myframe.getWidth(),myframe.getHeight())) if width != None and height != None: img=toBufferedImage(resize(img,width,height)); bos=ByteArrayOutputStream(); ImageIO.write(img, "png", Base64.getEncoder().wrap(bos)); data = bos.toString("UTF-8"); return {"display":"image","data":data}
def showImgWithFullWindow(width=None, height=None): """ This function shows the image from current IDV window while in GUI mode. optional arguments are width and height in pixels, they currently default to 600 and 400""" from java.util import Base64 ##only in java8 from javax.imageio import ImageIO from java.io import ByteArrayOutputStream from ucar.unidata.ui.ImageUtils import resize, toBufferedImage import java import java.awt.Robot as Robot import java.awt.Rectangle as Rectangle import java.awt.Toolkit as Toolkit from ucar.unidata.util import Misc VM = idv.getViewManager() myframe = VM.getDisplayWindow().getComponent() robotx = Robot(myframe.getGraphicsConfiguration().getDevice()) VM.toFront() #robotx.delay(250) Misc.sleep(350) pause() img = robotx.createScreenCapture( Rectangle(myframe.getX(), myframe.getY(), myframe.getWidth(), myframe.getHeight())) if width != None and height != None: img = toBufferedImage(resize(img, width, height)) bos = ByteArrayOutputStream() ImageIO.write(img, "png", Base64.getEncoder().wrap(bos)) data = bos.toString("UTF-8") return {"display": "image", "data": data}
def export(self, exportType): exportQuery = "%s:%s" % (self.facetField, self.facetFieldValue) outputType = "text/%s; charset=UTF-8" % type responseHeader = "attachment; filename=%s.%s" % (self.facetFieldValue, exportType) try: out = ByteArrayOutputStream() recnumreq = SearchRequest(exportQuery) recnumreq.setParam("fl","create_timestamp") recnumreq.setParam("rows", "0") self.indexer.search(recnumreq, out) recnumres = SolrResult(ByteArrayInputStream(out.toByteArray())) self.__rowsFoundSolr = "%s" % recnumres.getNumFound() except: self.errorMsg = "Export query failure. The issue has been logged (%s - %s)." % (sys.exc_info()[0], sys.exc_info()[1]) self.log.error("Export query threw an exception (package type was %s): %s - %s" % (self.facetFieldValue, sys.exc_info()[0], sys.exc_info()[1])) return out = ByteArrayOutputStream() req = SearchRequest(exportQuery) req.setParam("wt", exportType) req.setParam("rows", self.__rowsFoundSolr) self.indexer.search(req, out) self.response.setHeader("Content-Disposition", responseHeader) writer = self.response.getPrintWriter(outputType) writer.println(out.toString("UTF-8")) writer.close()
def parseFFmpeg(self, parent): if parent is not None: object = parent.getObject() if object is not None: payload = None try: payload = object.getPayload("ffmpeg.info") # Stream the content out to string out = ByteArrayOutputStream() IOUtils.copy(payload.open(), out) payload.close() self.__ffmpegRaw = out.toString("UTF-8") out.close() payload.close() # And parse it jsonData = JsonConfigHelper(self.__ffmpegRaw) if jsonData is None: return False else: self.__ffmpegData = jsonData.getJsonMap("/outputs") return True except: if payload is not None: payload.close() return False
def handleQuery(self, query, fieldName, formatStr): out = ByteArrayOutputStream() req = SearchRequest(query) req.setParam("fq", 'item_type:"object"') req.setParam("fq", 'workflow_id:"dataset"') req.setParam("rows", "1000") self.indexer.search(req, out) res = SolrResult(ByteArrayInputStream(out.toByteArray())) hits = HashSet() if (res.getNumFound() > 0): results = res.getResults() for searchRes in results: searchResList = searchRes.getList(fieldName) if (searchResList.isEmpty() == False): for hit in searchResList: if self.term is not None: if hit.find(self.term) != -1: hits.add(hit) else: hits.add(hit) self.writer.print("[") hitnum = 0 for hit in hits: if (hitnum > 0): self.writer.print("," + formatStr % {"hit": hit}) else: self.writer.print(formatStr % {"hit": hit}) hitnum += 1 self.writer.print("]") else: self.writer.println("[\"\"]") self.writer.close()
def search_solr(self): query = "(rootUri:" if self.rootUriList: query += "(" + " OR ".join(self.rootUriList) + ")" else: query += "\"" + self.rootUri + "\"" if self.type: query += " AND type:\"" + self.type + "\"" query += ")" #print "**********", query req = SearchRequest(query) req.setParam("facet", "false") req.setParam("rows", str(99999)) req.setParam("sort", "dateCreated asc") req.setParam("start", str(0)) #security_roles = page.authentication.get_roles_list(); #security_query = 'security_filter:("' + '" OR "'.join(security_roles) + '")' #req.addParam("fq", security_query) out = ByteArrayOutputStream() Services.indexer.annotateSearch(req, out) result = SolrResult(ByteArrayInputStream( out.toByteArray())).getResults() # Every annotation for this URI if self.type == "http://www.purl.org/anotar/ns/type/0.1#Tag": return self.process_tags(result) else: return self.process_response(result)
def __search(self): self.__result = JsonConfigHelper() portal = Services.getPortalManager().get(portalId) recordsPerPage = portal.recordsPerPage query = formData.get("query") if query is None or query == "": query = "*:*" req = SearchRequest(query) req.setParam("facet", "true") req.setParam("rows", str(recordsPerPage)) req.setParam("facet.field", portal.facetFieldList) req.setParam("facet.limit", str(portal.facetCount)) req.setParam("sort", "f_dc_title asc") portalQuery = portal.query print " * portalQuery=%s" % portalQuery if portalQuery: req.addParam("fq", portalQuery) # TODO resumptionToken #req.setParam("start", str((self.__pageNum - 1) * recordsPerPage)) print " * oai.py:", req.toString() out = ByteArrayOutputStream() Services.indexer.search(req, out) self.__result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray()))
def handleQuery(self, query, fieldName, formatStr): out = ByteArrayOutputStream() req = SearchRequest(query) req.setParam("fq", 'item_type:"object"') req.setParam("fq", 'workflow_id:"dataset"') req.setParam("rows", "1000") self.indexer.search(req, out) res = SolrResult(ByteArrayInputStream(out.toByteArray())) hits = HashSet() if (res.getNumFound() > 0): results = res.getResults() for searchRes in results: searchResList = searchRes.getList(fieldName) if (searchResList.isEmpty()==False): for hit in searchResList: if self.term is not None: if hit.find(self.term) != -1: hits.add(hit) else: hits.add(hit) self.writer.print("[") hitnum = 0 for hit in hits: if (hitnum > 0): self.writer.print(","+formatStr % {"hit":hit}) else: self.writer.print(formatStr % {"hit":hit}) hitnum += 1 self.writer.print("]") else: self.writer.println("[\"\"]") self.writer.close()
def __feed(self): self.portal = self.services.getPortalManager().get(self.vc("portalId")) recordsPerPage = self.portal.recordsPerPage pageNum = self.vc("sessionState").get("pageNum", 1) query = "*:*" if self.vc("formData").get("query"): query = self.vc("formData").get("query") query = self.__escapeQuery(query) req = SearchRequest(query) req.setParam("facet", "true") req.setParam("rows", str(recordsPerPage)) req.setParam("facet.field", self.portal.facetFieldList) req.setParam("facet.sort", "true") req.setParam("facet.limit", str(self.portal.facetCount)) req.setParam("sort", "f_dc_title asc") portalQuery = self.portal.query if portalQuery: req.addParam("fq", portalQuery) else: fq = self.vc("sessionState").get("fq") if fq is not None: req.setParam("fq", fq) req.setParam("start", str((pageNum - 1) * recordsPerPage)) self.log.debug(" * Query: '{}'", query) self.log.debug(" * portalQuery: '{}'", portalQuery) self.log.debug(" * feed.py: '{}'", req) out = ByteArrayOutputStream() self.services.indexer.search(req, out) self.__result = SolrResult(ByteArrayInputStream(out.toByteArray()))
def output_serialized_model(key, model): baos = ByteArrayOutputStream() oos = ObjectOutputStream(baos) oos.writeObject(model) oos.flush() oos.close() return baos.toByteArray()
def showImgWithLegend(width=None,height=None): """ This function shows the image and legend from current IDV window while in GUI mode. Optional arguments are width and height in pixels, they currently default to 600 and 400""" from java.util import Base64 ##only in java8 from javax.imageio import ImageIO from java.io import ByteArrayOutputStream from ucar.unidata.ui.ImageUtils import resize,toBufferedImage import java import java.awt.Robot as Robot import java.awt.Rectangle as Rectangle import java.awt.Toolkit as Toolkit from ucar.unidata.util import Misc VM=idv.getViewManager() VMC=VM.getContents() VMCC=VMC.getComponent(1) # the view and legend ; 0 is left most part of view window with controls for perspective views siz=VMCC.getSize() loc= VMCC.getLocationOnScreen() gc= VMCC.getGraphicsConfiguration() loc.x -= gc.getBounds().x loc.y -= gc.getBounds().y robotx=Robot(gc.getDevice()) VM.toFront() Misc.sleep(250) img = robotx.createScreenCapture(Rectangle(loc.x, loc.y,siz.width, siz.height)) if width != None and height != None: img=toBufferedImage(resize(img,width,height)); bos=ByteArrayOutputStream(); ImageIO.write(img, "png", Base64.getEncoder().wrap(bos)); data = bos.toString("UTF-8"); return {"display":"image","data":data}
def __getSolrData(self): prefix = self.getSearchTerms() if prefix != "": query = 'dc_title:"%(prefix)s" OR dc_title:"%(prefix)s*"' % { "prefix" : prefix } else: query = "*:*" portal = self.services.portalManager.get(self.portalId) if portal.searchQuery != "*:*": query = query + " AND " + portal.searchQuery req = SearchRequest(query) req.setParam("fq", 'item_type:"object"') if portal.query: req.addParam("fq", portal.query) req.setParam("fl", "score") req.setParam("sort", "score desc, f_dc_title asc") req.setParam("start", self.getStartIndex()) req.setParam("rows", self.getItemsPerPage()) try: out = ByteArrayOutputStream() indexer = self.services.getIndexer() indexer.search(req, out) return JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) except Exception, e: self.log.error("Failed to lookup '{}': {}", prefix, str(e))
def __getStorageId(self, oid): req = SearchRequest('id:"%s"' % oid) req.addParam("fl", "storage_id") out = ByteArrayOutputStream() Services.indexer.search(req, out) json = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) return json.getList("response/docs").get(0).get("storage_id")
def write(self, f, encoding=None, method="xml", pretty_print=False, xml_declaration=None, with_tail=True, standalone=None, compression=0, exclusive=False, with_comments=True, inclusive_ns_prefixes=None): if encoding is not None or method != "xml" or pretty_print is not False or xml_declaration is not None or with_tail is not True or standalone is not None or exclusive is not False or with_comments is not True or inclusive_ns_prefixes is not None: raise NotImplementedError if compression == 0 and isinstance(f, (basestring, file, File)): # direct source = DOMSource(self._document.getDocumentElement()) result = StreamResult(f) identityTransformation.transform(source, result) else: # first to a BAOS f2 = ByteArrayOutputStream() source = DOMSource(self._document.getDocumentElement()) result = StreamResult(f2) identityTransformation.transform(source, result) if compression > 0: bytes = f2.toByteArray() deflater = Deflater(compression) deflater.setInput(bytes) deflater.finish() output = jarray.zeros(2 * len(bytes), "b") length = deflater.deflate(output) output = output[:length] else: output = f2.toByteArray() if isinstance(f, basestring): open(f, "wb").write(output.tostring()) else: f.write(output.tostring())
def search_solr(self): query = "(rootUri:" if self.rootUriList: query += "(" + " OR ".join(self.rootUriList) + ")" else: query += "\"" + self.rootUri + "\"" if self.type: query += " AND type:\"" + self.type + "\"" query += ")" #print "**********", query req = SearchRequest(query) req.setParam("facet", "false") req.setParam("rows", str(99999)) req.setParam("sort", "dateCreated asc") req.setParam("start", str(0)) #security_roles = page.authentication.get_roles_list(); #security_query = 'security_filter:("' + '" OR "'.join(security_roles) + '")' #req.addParam("fq", security_query) out = ByteArrayOutputStream() Services.indexer.annotateSearch(req, out) result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) result = result.getJsonList("response/docs") # Every annotation for this URI if self.type == "http://www.purl.org/anotar/ns/type/0.1#Tag": return self.process_tags(result) else: return self.process_response(result)
def __search(self): query = self.formData.get("query") searchQuery = self.sessionState.get("searchQuery") if query is None or query == "": query = "*:*" if searchQuery and query == "*:*": query = searchQuery elif searchQuery: query += " AND " + searchQuery facetField = self.formData.get("facet.field") req = SearchRequest(query) req.setParam("facet", "true") req.setParam("fl", "id") req.setParam("rows", "0") req.setParam("facet.limit", "-1") req.setParam("facet.field", facetField) fq = self.sessionState.get("fq") if fq is not None: req.setParam("fq", fq) req.addParam("fq", 'item_type:"object"') # Make sure 'fq' has already been set in the session security_roles = self.auth.get_roles_list() security_query = 'security_filter:("' + '" OR "'.join( security_roles) + '")' req.addParam("fq", security_query) out = ByteArrayOutputStream() indexer = self.services.indexer indexer.search(req, out) result = SolrResult(ByteArrayInputStream(out.toByteArray())) return FacetList(facetField, result)
def getProperBase64EncodingOfFloatArr(cur_scan_arr): baos = ByteArrayOutputStream() ds = DataOutputStream(baos) for i in cur_scan_arr: ds.writeFloat(i) ds.flush() return base64.b64encode(baos.toByteArray())
def _addPropertyValueToTFMeta(self, object, tfMetaPropertyValue): objectMetadata = object.getMetadata() objectMetadata.setProperty("copyTFPackage", tfMetaPropertyValue) output = ByteArrayOutputStream(); objectMetadata.store(output, None); input = ByteArrayInputStream(output.toByteArray()); StorageUtils.createOrUpdatePayload(object,"TF-OBJ-META",input);
def __searchExecute(self, search, count): try: search.setParam("start", str(count)) out = ByteArrayOutputStream() self.services.indexer.search(search, out) return SolrResult(ByteArrayInputStream(out.toByteArray())) except Exception, e: self.log.error("Error during search: ", e) return None
def BufferedImgToNotebook(img): from java.io import ByteArrayOutputStream from java.util import Base64 ##only in java8 from javax.imageio import ImageIO bos = ByteArrayOutputStream() ImageIO.write(img, "png", Base64.getEncoder().wrap(bos)) data = bos.toString("UTF-8") return {"display": "image", "data": data}
def getAttachments(self): attachmentType = "review-attachments" req = SearchRequest("attached_to:%s AND attachment_type:%s" % (self.oid, attachmentType)) req.setParam("rows", "1000") out = ByteArrayOutputStream() self.Services.indexer.search(req, out) response = SolrResult(ByteArrayInputStream(out.toByteArray())) return response.getResults()
def getFileBytes(path): baos = ByteArrayOutputStream() fis = FileInputStream(File(path)) bytes = jarray.zeros(1024, 'b') while 1 == 1: readSize = fis.read(bytes) if (readSize == -1): break baos.write(bytes, 0, readSize) return baos.toByteArray()
def test_serialization(self): s = set(range(5, 10)) output = ByteArrayOutputStream() serializer = ObjectOutputStream(output) serializer.writeObject(s) serializer.close() input = ByteArrayInputStream(output.toByteArray()) unserializer = ObjectInputStream(input) self.assertEqual(s, unserializer.readObject())
def serialize(o, special=False): b = ByteArrayOutputStream() objs = ObjectOutputStream(b) objs.writeObject(o) if not special: OIS = ObjectInputStream else: OIS = PythonObjectInputStream objs = OIS(ByteArrayInputStream(b.toByteArray())) return objs.readObject()
def findPackagesToTransition(self, fromWorkflowId, fromWorkflowStage): req = SearchRequest("workflow_id:" + fromWorkflowId + " AND _query_:\"workflow_step:" + fromWorkflowStage + "\"") req.setParam("fq", "owner:[* TO *]") req.setParam("fq", "security_filter:[* TO *]") out = ByteArrayOutputStream() self.indexer.search(req, out) solrResult = SolrResult(ByteArrayInputStream(out.toByteArray())) return solrResult.getResults()
def __init__(self, stdout=True): if stdout: self._original = System.out self._set_stream = System.setOut else: self._original = System.err self._set_stream = System.setErr self._bytes = ByteArrayOutputStream() self._stream = PrintStream(self._bytes, False, 'UTF-8') self._set_stream(self._stream)
def buildRequest(self, payload): """ This is the main method through which an extension interacts with a IScannerInsertionPoint instance. They provide the payload through the payload parameter and we replace it in our request. If the parameter type is something that could be handled by Burp's helpers we update it in that way, otherwise we do it by modifying the byte arrays directly. Args: payload: the active scanner payload provided by the extension. """ start = self.start end = self.end if self.type == IScannerInsertionPoint.INS_PARAM_JSON: start, end, payload = self.encodeJson(start, end, payload) elif self.type == IScannerInsertionPoint.INS_HEADER: pass elif self.type in [IScannerInsertionPoint.INS_PARAM_XML, IScannerInsertionPoint.INS_PARAM_XML_ATTR]: start, end, payload = self.encodeXml(start, end, payload) else: start, end, payload = self.encodeUrl(start, end, payload) stream = ByteArrayOutputStream() stream.write(self.request[0:start]) stream.write(payload) stream.write(self.request[end:]) newRequestBytes = self.updateContentLength(stream.toByteArray()) return newRequestBytes
def __loadSolrData(self, oid): portal = self.vc("page").getPortal() query = 'id:"%s"' % oid if portal.getSearchQuery(): query += " AND " + portal.getSearchQuery() req = SearchRequest(query) req.addParam("fq", 'item_type:"object"') req.addParam("fq", portal.getQuery()) out = ByteArrayOutputStream() self.vc("Services").getIndexer().search(req, out) return SolrResult(ByteArrayInputStream(out.toByteArray()))
def outputImage(self, bufferedImage, filename): if not filename.endswith(SUPPORTED_FORMATS): filename += SUPPORTED_FORMATS[0] imageType = filename[-3:] from java.io import ByteArrayOutputStream, File from javax.imageio import ImageIO bytes = ByteArrayOutputStream() ImageIO.write(bufferedImage, imageType, bytes) f = File(filename) from com.raytheon.uf.common.util import FileUtil FileUtil.bytes2File(bytes.toByteArray(), f)
def testEncryptedPassword(self): credential_string = 'AES}0vlIcO+I+VWV9aQ1wzQUa1qtByh4D9d0I1dJHa7HsdE=' try: bos = ByteArrayOutputStream() dos = DataOutputStream(bos) dos.writeBytes(credential_string) byte_array = bos.toByteArray() dos.close() except IOException, ioe: self.fail('Unexpected exception writing out credential : ', str(ioe))