def __search(self): indexer = Services.getIndexer() portalQuery = Services.getPortalManager().get(portalId).getQuery() req = SearchRequest("last_modified:[NOW-1MONTH TO *]") req.setParam("fq", 'item_type:"object"') if portalQuery: req.addParam("fq", portalQuery) req.setParam("rows", "10") req.setParam("sort", "last_modified desc, f_dc_title asc"); out = ByteArrayOutputStream() indexer.search(req, out) self.__latest = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) req = SearchRequest("*:*") req.setParam("fq", 'item_type:"object"') if portalQuery: req.addParam("fq", portalQuery) req.addParam("fq", "") req.setParam("rows", "0") out = ByteArrayOutputStream() indexer.search(req, out) sessionState.set("fq", 'item_type:"object"') #sessionState.set("query", portalQuery.replace("\"", "'")) self.__result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray()))
def write(self, f, encoding=None, method="xml", pretty_print=False, xml_declaration=None, with_tail=True, standalone=None, compression=0, exclusive=False, with_comments=True, inclusive_ns_prefixes=None): if encoding is not None or method != "xml" or pretty_print is not False or xml_declaration is not None or with_tail is not True or standalone is not None or exclusive is not False or with_comments is not True or inclusive_ns_prefixes is not None: raise NotImplementedError if compression == 0 and isinstance(f, (basestring, file, File)): # direct source = DOMSource(self._document.getDocumentElement()) result = StreamResult(f) identityTransformation.transform(source, result) else: # first to a BAOS f2 = ByteArrayOutputStream() source = DOMSource(self._document.getDocumentElement()) result = StreamResult(f2) identityTransformation.transform(source, result) if compression > 0: bytes = f2.toByteArray() deflater = Deflater(compression) deflater.setInput(bytes) deflater.finish() output = jarray.zeros(2 * len(bytes), "b") length = deflater.deflate(output) output = output[:length] else: output = f2.toByteArray() if isinstance(f, basestring): open(f, "wb").write(output.tostring()) else: f.write(output.tostring())
def playWithByteStream(): baos = ByteArrayOutputStream() ds = DataOutputStream(baos) ds.write(1) ds.flush() ds.write(2) ds.flush() print baos.toByteArray() print "type(baos.toByteArray()): " + str(type(baos.toByteArray())) print "base 64 encoded: " print base64.b64encode(baos.toByteArray())
def __search(self): indexer = Services.getIndexer() portalQuery = Services.getPortalManager().get(self.vc("portalId")).getQuery() portalSearchQuery = Services.getPortalManager().get(self.vc("portalId")).getSearchQuery() # Security prep work current_user = self.vc("page").authentication.get_username() security_roles = self.vc("page").authentication.get_roles_list() security_filter = 'security_filter:("' + '" OR "'.join(security_roles) + '")' security_exceptions = 'security_exception:"' + current_user + '"' owner_query = 'owner:"' + current_user + '"' security_query = "(" + security_filter + ") OR (" + security_exceptions + ") OR (" + owner_query + ")" isAdmin = self.vc("page").authentication.is_admin() req = SearchRequest("last_modified:[NOW-1MONTH TO *]") req.setParam("fq", 'item_type:"object"') req.addParam("fq", "workflow_modified:false") if portalQuery: req.addParam("fq", portalQuery) if portalSearchQuery: req.addParam("fq", portalSearchQuery) req.setParam("rows", "10") req.setParam("sort", "last_modified desc, f_dc_title asc") if not isAdmin: req.addParam("fq", security_query) out = ByteArrayOutputStream() indexer.search(req, out) self.__latest = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) req = SearchRequest("*:*") req.setParam("fq", 'item_type:"object"') if portalQuery: req.addParam("fq", portalQuery) if portalSearchQuery: req.addParam("fq", portalSearchQuery) req.setParam("facet", "true") req.setParam("facet.field", "workflow_step") req.setParam("fl", "id") req.setParam("rows", "1") if not isAdmin: req.addParam("fq", security_query) out = ByteArrayOutputStream() indexer.search(req, out) # self.vc("sessionState").set("fq", 'item_type:"object"') # sessionState.set("query", portalQuery.replace("\"", "'")) self.__result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray()))
def search_solr(self): query = "(rootUri:" if self.rootUriList: query += "(" + " OR ".join(self.rootUriList) + ")" else: query += "\"" + self.rootUri + "\"" if self.type: query += " AND type:\"" + self.type + "\"" query += ")" #print "**********", query req = SearchRequest(query) req.setParam("facet", "false") req.setParam("rows", str(99999)) req.setParam("sort", "dateCreated asc") req.setParam("start", str(0)) #security_roles = page.authentication.get_roles_list(); #security_query = 'security_filter:("' + '" OR "'.join(security_roles) + '")' #req.addParam("fq", security_query) out = ByteArrayOutputStream() Services.indexer.annotateSearch(req, out) result = SolrResult(ByteArrayInputStream( out.toByteArray())).getResults() # Every annotation for this URI if self.type == "http://www.purl.org/anotar/ns/type/0.1#Tag": return self.process_tags(result) else: return self.process_response(result)
def __isLinked(self, ids, map): query = 'package_node_id:("' + '" OR "'.join(ids) + '")' req = SearchRequest(query) req.setParam("fq", 'recordtype:"master"') req.addParam("fq", 'item_type:"object"') req.setParam("rows", "9999") out = ByteArrayOutputStream() self.__indexer.search(req, out) result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) currentList = [] for doc in result.getJsonList("response/docs"): currentList.extend(doc.getList("package_node_id")) if type(map).__name__ == "LinkedHashMap": for author in map.keySet(): authorDocs = map.get(author) for doc in authorDocs: if doc.get("id") in currentList: doc.set("linked", "true") else: for author in map.keys(): authorList = map[author] for count in authorList: doc = authorList[count] if doc.get("id") in currentList: doc.set("linked", "true")
def numberOfModifiedRecord(self): indexer = self.services.getIndexer() portalQuery = self.services.getPortalManager().get(self.portal.getName()).getQuery() portalSearchQuery = self.services.getPortalManager().get(self.portal.getName()).getSearchQuery() # Security prep work current_user = self.page.authentication.get_username() security_roles = self.page.authentication.get_roles_list() security_filter = 'security_filter:("' + '" OR "'.join(security_roles) + '")' security_exceptions = 'security_exception:"' + current_user + '"' owner_query = 'owner:"' + current_user + '"' security_query = "(" + security_filter + ") OR (" + security_exceptions + ") OR (" + owner_query + ")" req = SearchRequest("modified:true") req.setParam("fq", 'item_type:"object"') if portalQuery: req.addParam("fq", portalQuery) if portalSearchQuery: req.addParam("fq", portalSearchQuery) req.addParam("fq", "") req.setParam("rows", "0") if not self.page.authentication.is_admin(): req.addParam("fq", security_query) out = ByteArrayOutputStream() indexer.search(req, out) self.__result = JsonSimpleConfig(ByteArrayInputStream(out.toByteArray())) return self.__result.getString(None, "response", "numFound")
def handleQuery(self, query, fieldName, formatStr): out = ByteArrayOutputStream() req = SearchRequest(query) req.setParam("fq", 'item_type:"object"') req.setParam("fq", 'workflow_id:"dataset"') req.setParam("rows", "1000") self.indexer.search(req, out) res = SolrResult(ByteArrayInputStream(out.toByteArray())) hits = HashSet() if (res.getNumFound() > 0): results = res.getResults() for searchRes in results: searchResList = searchRes.getList(fieldName) if (searchResList.isEmpty() == False): for hit in searchResList: if self.term is not None: if hit.find(self.term) != -1: hits.add(hit) else: hits.add(hit) self.writer.print("[") hitnum = 0 for hit in hits: if (hitnum > 0): self.writer.print("," + formatStr % {"hit": hit}) else: self.writer.print(formatStr % {"hit": hit}) hitnum += 1 self.writer.print("]") else: self.writer.println("[\"\"]") self.writer.close()
def numberOfModifiedRecord(self): indexer = self.services.getIndexer() portalQuery = self.services.getPortalManager().get(self.portal.getName()).getQuery() portalSearchQuery = self.services.getPortalManager().get(self.portal.getName()).getSearchQuery() # Security prep work current_user = self.page.authentication.get_username() security_roles = self.page.authentication.get_roles_list() security_filter = 'security_filter:("' + '" OR "'.join(security_roles) + '")' security_exceptions = 'security_exception:"' + current_user + '"' owner_query = 'owner:"' + current_user + '"' security_query = "(" + security_filter + ") OR (" + security_exceptions + ") OR (" + owner_query + ")" req = SearchRequest("modified:true") req.setParam("fq", 'item_type:"object"') if portalQuery: req.addParam("fq", portalQuery) if portalSearchQuery: req.addParam("fq", portalSearchQuery) req.addParam("fq", "") req.setParam("rows", "0") if not self.page.authentication.is_admin(): req.addParam("fq", security_query) out = ByteArrayOutputStream() indexer.search(req, out) self.__result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) return self.__result.get("response/numFound")
def __getUsers(self, oid): indexer = Services.getIndexer() req = SearchRequest("id:" + oid) req.setParam("fl", "security_exception,owner") out = ByteArrayOutputStream() indexer.search(req, out) rtJson = "" try: qresult = SolrResult(ByteArrayInputStream( out.toByteArray())).getResults().get(0) owner = qresult.getString(None, 'owner') secException = qresult.getArray('security_exception') if secException is None: secException = JSONArray() self.log.debug("Owner of object: " + owner) self.log.debug("Viewer(s) of object: " + secException.toString()) if secException.contains(owner): secException.remove(owner) return '{"owner":"' + owner + '", "viewers": ' + secException.toString( ) + '}' except Exception, e: self.log.error("Error during query/package ownership data" + str(e))
def _toPDF(fis) : b = ByteArrayOutputStream() # create a new document document = Document(); # get Instance of the PDFWriter pdfWriter = PdfWriter.getInstance(document, b) # document header attributes document.addAuthor("betterThanZero") document.addCreationDate() document.addProducer() document.addCreator("MySampleCode.com") document.addTitle("Demo for iText XMLWorker") document.setPageSize(PageSize.LETTER); # open document document.open(); # To convert a HTML file from the filesystem # String File_To_Convert = "docs/SamplePDF.html"; # //FileInputStream fis = new FileInputStream(File_To_Convert); # URL for HTML page # get the XMLWorkerHelper Instance worker = XMLWorkerHelper.getInstance(); # convert to PDF worker.parseXHtml(pdfWriter, document, fis); # close the document document.close(); # close the writer pdfWriter.close(); return b.toByteArray()
def search_solr(self): query = "(rootUri:" if self.rootUriList: query += "(" + " OR ".join(self.rootUriList) + ")" else: query += "\"" + self.rootUri + "\"" if self.type: query += " AND type:\"" + self.type + "\"" query += ")" #print "**********", query req = SearchRequest(query) req.setParam("facet", "false") req.setParam("rows", str(99999)) req.setParam("sort", "dateCreated asc") req.setParam("start", str(0)) #security_roles = page.authentication.get_roles_list(); #security_query = 'security_filter:("' + '" OR "'.join(security_roles) + '")' #req.addParam("fq", security_query) out = ByteArrayOutputStream() Services.indexer.annotateSearch(req, out) result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) result = result.getJsonList("response/docs") # Every annotation for this URI if self.type == "http://www.purl.org/anotar/ns/type/0.1#Tag": return self.process_tags(result) else: return self.process_response(result)
def _encode(self, img, g, size, **opts): file = opts['file'] if opts.has_key('file') else None if file: # write out to file ImageIO.write(img, self.format, util.toFile(file)) else: # write to byte array out = ByteArrayOutputStream() ImageIO.write(img, self.format, out) bytes = out.toByteArray() # check for strencode flag to check whether to return result as raw # bytes or as string if opts.has_key('strencode'): return str(String(bytes, 0, 0, len(bytes))) return out.toByteArray()
def handleWorkflowStep(self): out = ByteArrayOutputStream() req = SearchRequest("workflow_step_label:[* TO *]" ) req.setParam("fq", 'item_type:"object"') req.setParam("fq", 'workflow_id:"dataset"') req.setParam("rows", "1000") self.indexer.search(req, out) res = SolrResult(ByteArrayInputStream(out.toByteArray())) hits = HashSet() if (res.getNumFound() > 0): recordTypeResults = res.getResults() for recordTypeResult in recordTypeResults: recordTypeList = recordTypeResult.getList("workflow_step_label") if (recordTypeList.isEmpty()==False): for hit in recordTypeList: hits.add(hit) self.writer.println("[") hitnum = 0 for hit in hits: if (hitnum > 0): self.writer.println(",{\"value\": \"%s\",\n\"label\": \"%s\"}" % (hit,hit)) else: self.writer.println("{\"value\": \"%s\",\n\"label\": \"%s\"}" % (hit,hit)) hitnum += 1 self.writer.println("]") else: self.writer.println("[\"\"]") self.writer.close()
def handleQuery(self, query, fieldName, formatStr): out = ByteArrayOutputStream() req = SearchRequest(query) req.setParam("fq", 'item_type:"object"') req.setParam("fq", 'workflow_id:"dataset"') req.setParam("rows", "1000") self.indexer.search(req, out) res = SolrResult(ByteArrayInputStream(out.toByteArray())) hits = HashSet() if (res.getNumFound() > 0): results = res.getResults() for searchRes in results: searchResList = searchRes.getList(fieldName) if (searchResList.isEmpty()==False): for hit in searchResList: if self.term is not None: if hit.find(self.term) != -1: hits.add(hit) else: hits.add(hit) self.writer.print("[") hitnum = 0 for hit in hits: if (hitnum > 0): self.writer.print(","+formatStr % {"hit":hit}) else: self.writer.print(formatStr % {"hit":hit}) hitnum += 1 self.writer.print("]") else: self.writer.println("[\"\"]") self.writer.close()
def __getSolrData(self): prefix = self.getSearchTerms() if prefix != "": query = 'dc_title:"%(prefix)s" OR dc_title:"%(prefix)s*"' % { "prefix" : prefix } else: query = "*:*" portal = self.services.portalManager.get(self.portalId) if portal.searchQuery != "*:*": query = query + " AND " + portal.searchQuery req = SearchRequest(query) req.setParam("fq", 'item_type:"object"') if portal.query: req.addParam("fq", portal.query) req.setParam("fl", "score") req.setParam("sort", "score desc, f_dc_title asc") req.setParam("start", self.getStartIndex()) req.setParam("rows", self.getItemsPerPage()) try: out = ByteArrayOutputStream() indexer = self.services.getIndexer() indexer.search(req, out) return JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) except Exception, e: self.log.error("Failed to lookup '{}': {}", prefix, str(e))
def output_serialized_model(key, model): baos = ByteArrayOutputStream() oos = ObjectOutputStream(baos) oos.writeObject(model) oos.flush() oos.close() return baos.toByteArray()
def __getSolrData(self): prefix = self.getSearchTerms() if prefix != "": terms = prefix.split(" ") if len(terms)>1: termsQuery = " OR %s" ' OR '.join(terms) else: termsQuery = "" queryValue = "%(prefix)s OR %(prefix)s*%(terms)s" % { "prefix": prefix, "terms": termsQuery } query = 'dc_title:(%(qv)s)^2 OR dc_identifier:(%(qv)s)^0.5' % { "qv": queryValue } else: query = "*:*" portal = self.services.portalManager.get(self.portalId) sq = portal.searchQuery if sq not in ["", "*:*"]: query = query + " AND " + portal.searchQuery req = SearchRequest(query) req.setParam("fq", 'item_type:"object"') if portal.query: req.addParam("fq", portal.query) req.setParam("fl", "score") req.setParam("sort", "score desc, f_dc_title asc") req.setParam("start", self.getStartIndex()) req.setParam("rows", self.getItemsPerPage()) try: out = ByteArrayOutputStream() indexer = self.services.getIndexer() indexer.search(req, out) return SolrResult(ByteArrayInputStream(out.toByteArray())) except Exception, e: self.log.error("Failed to lookup '{}': {}", prefix, e.getMessage())
def _encode(self, img, g, size, **opts): file = opts['file'] if opts.has_key('file') else None if file: # write out to file ImageIO.write(img, self.format, util.toFile(file)) else: # write to byte array out = ByteArrayOutputStream() ImageIO.write(img, self.format, out) bytes = out.toByteArray() # check for strencode flag to check whether to return result as raw # bytes or as string if opts.has_key('strencode'): return str(String(bytes, 0, 0, len(bytes))) return out.toByteArray()
def __getStorageId(self, oid): req = SearchRequest('id:"%s"' % oid) req.addParam("fl", "storage_id") out = ByteArrayOutputStream() Services.indexer.search(req, out) json = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) return json.getList("response/docs").get(0).get("storage_id")
def image_byte_array(self): baos = ByteArrayOutputStream(); ImageIO.write( self.image(), "png", baos ); baos.flush(); image_bytes = baos.toByteArray(); baos.close() return image_bytes
def __search(self): self.__result = JsonConfigHelper() portal = Services.getPortalManager().get(portalId) recordsPerPage = portal.recordsPerPage query = formData.get("query") if query is None or query == "": query = "*:*" req = SearchRequest(query) req.setParam("facet", "true") req.setParam("rows", str(recordsPerPage)) req.setParam("facet.field", portal.facetFieldList) req.setParam("facet.limit", str(portal.facetCount)) req.setParam("sort", "f_dc_title asc") portalQuery = portal.query print " * portalQuery=%s" % portalQuery if portalQuery: req.addParam("fq", portalQuery) # TODO resumptionToken #req.setParam("start", str((self.__pageNum - 1) * recordsPerPage)) print " * oai.py:", req.toString() out = ByteArrayOutputStream() Services.indexer.search(req, out) self.__result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray()))
def __getSolrData(self): prefix = self.getSearchTerms() if prefix: query = '%(prefix)s OR %(prefix)s*' % { "prefix" : prefix } else: query = "*:*" level = self.getFormData("level", None) if level is not None: if level=="top": #query += " AND skos_hasTopConcept:http*" query += ' AND dc_identifier:"http://purl.org/anzsrc/seo/#division"' else: query += ' AND skos_broader:"%s"' % level req = SearchRequest(query) req.addParam("fq", 'item_type:"object"') req.addParam("fq", 'repository_type:"SEO"') req.setParam("fl", "score") req.setParam("sort", "score desc") req.setParam("start", self.getStartIndex()) req.setParam("rows", self.getItemsPerPage()) try: out = ByteArrayOutputStream() indexer = self.services.getIndexer() indexer.search(req, out) return JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) except Exception, e: self.log.error("Failed to lookup '{}': {}", prefix, str(e))
def checkAprovedRequests(self, provisioned=0, startPage=1): """ A customised query for package type of arms at workflow_step of arms-approved Get a list of requests filtered by provisioning_checklist """ workflowStep = "arms-approved" if self.packageType: req = SearchRequest(self.packageType) else: req = SearchRequest("packageType:arms") req.addParam("fq", 'workflow_step:' + workflowStep) if provisioned: req.addParam("fq", '-provisioning_checklist.4:null') else: req.addParam("fq", 'provisioning_checklist.4:null') req.setParam("sort", "date_object_modified desc, f_dc_title asc") req.setParam("fl","id,dc_title,date-provisioned") out = ByteArrayOutputStream() self.indexer.search(req, out) solrResults = SolrResult(ByteArrayInputStream(out.toByteArray())) if solrResults: results = solrResults.getResults() if results: results = self.mergeEvents(results, ["arms_draft","arms_redraft","arms_review","arms_approved","arms_rejected"]) self._setPaging(results.size()) return results else: return ArrayList()
def image_byte_array(self): baos = ByteArrayOutputStream() ImageIO.write(self.image(), "png", baos) baos.flush() image_bytes = baos.toByteArray() baos.close() return image_bytes
def __search(self): query = formData.get("query") searchQuery = sessionState.get("searchQuery") if query is None or query == "": query = "*:*" if searchQuery and query == "*:*": query = searchQuery elif searchQuery: query += " AND " + searchQuery facetField = formData.get("facet.field") req = SearchRequest(query) req.setParam("facet", "true") req.setParam("fl", "id") req.setParam("rows", "0") req.setParam("facet.limit", "-1") req.setParam("facet.field", facetField) fq = sessionState.get("fq") if fq is not None: req.setParam("fq", fq) req.addParam("fq", 'item_type:"object"') # Make sure 'fq' has already been set in the session security_roles = self.authentication.get_roles_list(); security_query = 'security_filter:("' + '" OR "'.join(security_roles) + '")' req.addParam("fq", security_query) out = ByteArrayOutputStream() indexer = Services.getIndexer() indexer.search(req, out) result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) self.__facetList = FacetList(facetField, result)
def __search(self): query = self.formData.get("query") searchQuery = self.sessionState.get("searchQuery") if query is None or query == "": query = "*:*" if searchQuery and query == "*:*": query = searchQuery elif searchQuery: query += " AND " + searchQuery facetField = self.formData.get("facet.field") req = SearchRequest(query) req.setParam("facet", "true") req.setParam("fl", "id") req.setParam("rows", "0") req.setParam("facet.limit", "-1") req.setParam("facet.field", facetField) fq = self.sessionState.get("fq") if fq is not None: req.setParam("fq", fq) req.addParam("fq", 'item_type:"object"') # Make sure 'fq' has already been set in the session security_roles = self.auth.get_roles_list() security_query = 'security_filter:("' + '" OR "'.join( security_roles) + '")' req.addParam("fq", security_query) out = ByteArrayOutputStream() indexer = self.services.indexer indexer.search(req, out) result = SolrResult(ByteArrayInputStream(out.toByteArray())) return FacetList(facetField, result)
def export(self, exportType): exportQuery = "%s:%s" % (self.facetField, self.facetFieldValue) outputType = "text/%s; charset=UTF-8" % type responseHeader = "attachment; filename=%s.%s" % (self.facetFieldValue, exportType) try: out = ByteArrayOutputStream() recnumreq = SearchRequest(exportQuery) recnumreq.setParam("fl","create_timestamp") recnumreq.setParam("rows", "0") self.indexer.search(recnumreq, out) recnumres = SolrResult(ByteArrayInputStream(out.toByteArray())) self.__rowsFoundSolr = "%s" % recnumres.getNumFound() except: self.errorMsg = "Export query failure. The issue has been logged (%s - %s)." % (sys.exc_info()[0], sys.exc_info()[1]) self.log.error("Export query threw an exception (package type was %s): %s - %s" % (self.facetFieldValue, sys.exc_info()[0], sys.exc_info()[1])) return out = ByteArrayOutputStream() req = SearchRequest(exportQuery) req.setParam("wt", exportType) req.setParam("rows", self.__rowsFoundSolr) self.indexer.search(req, out) self.response.setHeader("Content-Disposition", responseHeader) writer = self.response.getPrintWriter(outputType) writer.println(out.toString("UTF-8")) writer.close()
def buildRequest(self, payload): """ This is the main method through which an extension interacts with a IScannerInsertionPoint instance. They provide the payload through the payload parameter and we replace it in our request. If the parameter type is something that could be handled by Burp's helpers we update it in that way, otherwise we do it by modifying the byte arrays directly. Args: payload: the active scanner payload provided by the extension. """ start = self.start end = self.end if self.type == IScannerInsertionPoint.INS_PARAM_JSON: start, end, payload = self.encodeJson(start, end, payload) elif self.type == IScannerInsertionPoint.INS_HEADER: pass elif self.type in [IScannerInsertionPoint.INS_PARAM_XML, IScannerInsertionPoint.INS_PARAM_XML_ATTR]: start, end, payload = self.encodeXml(start, end, payload) else: start, end, payload = self.encodeUrl(start, end, payload) stream = ByteArrayOutputStream() stream.write(self.request[0:start]) stream.write(payload) stream.write(self.request[end:]) newRequestBytes = self.updateContentLength(stream.toByteArray()) return newRequestBytes
def findPublishedRecords(self): #req = SearchRequest("published:\"true\"") req = SearchRequest("storage_id:\"c6a214670dc644e5ebdaede4a2243f67\"") out = ByteArrayOutputStream() self.indexer.search(req, out) solrResult = SolrResult(ByteArrayInputStream(out.toByteArray())) return solrResult.getResults()
def __feed(self): portal = Services.getPortalManager().get(portalId) recordsPerPage = portal.recordsPerPage pageNum = sessionState.get("pageNum", 1) query = "*:*" req = SearchRequest(query) req.setParam("facet", "true") req.setParam("rows", str(recordsPerPage)) req.setParam("facet.field", portal.facetFieldList) req.setParam("facet.sort", "true") req.setParam("facet.limit", str(portal.facetCount)) req.setParam("sort", "f_dc_title asc") portalQuery = portal.query if portalQuery: req.addParam("fq", portalQuery) else: fq = sessionState.get("fq") req.setParam("fq", fq) req.setParam("start", str((pageNum - 1) * recordsPerPage)) print " * query: ", query print " * portalQuery='%s'" % portalQuery print " * feed.py:", req.toString() out = ByteArrayOutputStream() Services.indexer.search(req, out) self.__result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray()))
def checkEventLogForEdits(self, oid): req = SearchRequest("oid:" + oid + " AND context:Workflow AND eventType:Save") out = ByteArrayOutputStream() self.indexer.searchByIndex(req, out, "eventLog") solrResult = SolrResult(ByteArrayInputStream(out.toByteArray())) return solrResult.getRows() > 0
def findPublishedRecords(self): #req = SearchRequest("published:\"true\"") req = SearchRequest("storage_id:\"c6a214670dc644e5ebdaede4a2243f67\"") out = ByteArrayOutputStream() self.indexer.search(req, out) solrResult = SolrResult(ByteArrayInputStream(out.toByteArray())) return solrResult.getResults()
def __feed(self): self.portal = self.services.getPortalManager().get(self.vc("portalId")) recordsPerPage = self.portal.recordsPerPage pageNum = self.vc("sessionState").get("pageNum", 1) query = "*:*" if self.vc("formData").get("query"): query = self.vc("formData").get("query") query = self.__escapeQuery(query) req = SearchRequest(query) req.setParam("facet", "true") req.setParam("rows", str(recordsPerPage)) req.setParam("facet.field", self.portal.facetFieldList) req.setParam("facet.sort", "true") req.setParam("facet.limit", str(self.portal.facetCount)) req.setParam("sort", "f_dc_title asc") portalQuery = self.portal.query if portalQuery: req.addParam("fq", portalQuery) else: fq = self.vc("sessionState").get("fq") if fq is not None: req.setParam("fq", fq) req.setParam("start", str((pageNum - 1) * recordsPerPage)) self.log.debug(" * Query: '{}'", query) self.log.debug(" * portalQuery: '{}'", portalQuery) self.log.debug(" * feed.py: '{}'", req) out = ByteArrayOutputStream() self.services.indexer.search(req, out) self.__result = SolrResult(ByteArrayInputStream(out.toByteArray()))
def handleGrantNumber(self): out = ByteArrayOutputStream() req = SearchRequest("grant_numbers:%s*" % self.term) req.setParam("fq", 'item_type:"object"') req.setParam("fq", 'workflow_id:"dataset"') req.setParam("rows", "1000") self.indexer.search(req, out) res = SolrResult(ByteArrayInputStream(out.toByteArray())) hits = HashSet() if (res.getNumFound() > 0): creatorResults = res.getResults() for creatorRes in creatorResults: creatorList = creatorRes.getList("grant_numbers") if (creatorList.isEmpty()==False): for hit in creatorList: hits.add(hit) self.writer.print("[") hitnum = 0 for hit in hits: if (hitnum > 0): self.writer.print(",\"%s\"" % hit) else: self.writer.print("\"%s\"" % hit) hitnum += 1 self.writer.print("]") else: self.writer.println("[\"\"]") self.writer.close()
def __call__(self, _input, profile_run=False, **kw): nsmap = dict(xsl='http://www.w3.org/1999/XSL/Transform') output_method = 'xml' output_encoding = 'utf-8' nodes = self.xsl_tree.xpath('xsl:output/@method', namespaces=nsmap) if len(nodes) > 0: output_method = nodes[0] nodes = self.xsl_tree.xpath('xsl:output/@encoding', namespaces=nsmap) if len(nodes) > 0: output_encoding = nodes[0] #print tostring(_input) if isinstance(_input, _ElementTree): doc_source = DOMSource(_input._dom_doc) elif isinstance(_input, _Element): # Xalan-J 2.7.1 does not support a DOMSource from an Element # so we build new document dom_doc = builder.newDocument() dom_root = dom_doc.importNode(_input._dom_element, True) dom_doc.appendChild(dom_root) doc_source = DOMSource(dom_doc) else: raise NotImplementedError() if output_method in ('xml', 'html'): # TODO: for testing outputstream = ByteArrayOutputStream() result = StreamResult(outputstream) self.transformer.transform(doc_source, result) bytes = outputstream.toByteArray() inputstream = ByteArrayInputStream(bytes) try: dom_doc = builder.parse(inputstream) except: import sys sys.stderr.write(bytes.tostring()) raise result_tree = _ElementTree(dom_doc) return result_tree result = DOMResult() self.transformer.transform(doc_source, result) dom_doc = result.getNode() result_tree = _ElementTree(dom_doc) #print tostring(result_tree) return result_tree else: outputstream = ByteArrayOutputStream() result = StreamResult(outputstream) self.transformer.transform(doc_source, result) resultdoc = builder.newDocument() resulttree = _XSLTResultTree(resultdoc) resulttree._text = outputstream.toString(output_encoding) return resulttree
def __call__(self, _input, profile_run=False, **kw): nsmap = dict(xsl='http://www.w3.org/1999/XSL/Transform') output_method = 'xml' output_encoding = 'utf-8' nodes = self.xsl_tree.xpath('xsl:output/@method', namespaces=nsmap) if len(nodes) > 0: output_method = nodes[0] nodes = self.xsl_tree.xpath('xsl:output/@encoding', namespaces=nsmap) if len(nodes) > 0: output_encoding = nodes[0] #print tostring(_input) if isinstance(_input, _ElementTree): doc_source = DOMSource(_input._dom_doc) elif isinstance(_input, _Element): # Xalan-J 2.7.1 does not support a DOMSource from an Element # so we build new document dom_doc = builder.newDocument() dom_root = dom_doc.importNode(_input._dom_element, True) dom_doc.appendChild(dom_root) doc_source = DOMSource(dom_doc) else: raise NotImplementedError() if output_method in ('xml', 'html'): # TODO: for testing outputstream = ByteArrayOutputStream() result = StreamResult(outputstream) self.transformer.transform(doc_source, result) bytes = outputstream.toByteArray() inputstream = ByteArrayInputStream(bytes) try: dom_doc = builder.parse(inputstream) except: import sys sys.stderr.write(bytes.tostring()) raise result_tree = _ElementTree(dom_doc) return result_tree result = DOMResult() self.transformer.transform(doc_source, result) dom_doc = result.getNode() result_tree = _ElementTree(dom_doc) #print tostring(result_tree) return result_tree else: outputstream = ByteArrayOutputStream() result = StreamResult(outputstream) self.transformer.transform(doc_source, result) resultdoc = builder.newDocument() resulttree = _XSLTResultTree(resultdoc) resulttree._text = outputstream.toString(output_encoding) return resulttree
def findPackagesToTransition(self, fromWorkflowId, fromWorkflowStage): req = SearchRequest("workflow_id:"+fromWorkflowId+" AND _query_:\"workflow_step:"+fromWorkflowStage+"\"") req.setParam("fq", "owner:[* TO *]") req.setParam("fq", "security_filter:[* TO *]") out = ByteArrayOutputStream() self.indexer.search(req, out) solrResult = SolrResult(ByteArrayInputStream(out.toByteArray())) return solrResult.getResults()
def __isIndexed(self, oid): query = 'id:"%s"' % oid req = SearchRequest(query) req.addParam("fq", 'item_type:"object"') out = ByteArrayOutputStream() self.Services.indexer.search(req, out) solrData = SolrResult(ByteArrayInputStream(out.toByteArray())) return solrData.getNumFound()!=0
def _addPropertyValueToTFMeta(self, object, tfMetaPropertyValue): objectMetadata = object.getMetadata() objectMetadata.setProperty("copyTFPackage", tfMetaPropertyValue) output = ByteArrayOutputStream() objectMetadata.store(output, None) input = ByteArrayInputStream(output.toByteArray()) StorageUtils.createOrUpdatePayload(object, "TF-OBJ-META", input)
def _addPropertyValueToTFMeta(self, object, tfMetaPropertyValue): objectMetadata = object.getMetadata() objectMetadata.setProperty("copyTFPackage", tfMetaPropertyValue) output = ByteArrayOutputStream(); objectMetadata.store(output, None); input = ByteArrayInputStream(output.toByteArray()); StorageUtils.createOrUpdatePayload(object,"TF-OBJ-META",input);
def getProperBase64EncodingOfFloatArr(cur_scan_arr): baos = ByteArrayOutputStream() ds = DataOutputStream(baos) for i in cur_scan_arr: ds.writeFloat(i) ds.flush() return base64.b64encode(baos.toByteArray())
def getSuggestedNames(self): # search common forms lookupNames = [] surname = self.__metadata.getList("surname").get(0) firstName = self.__metadata.getList("firstName").get(0) firstInitial = firstName[0].upper() secondName = self.__metadata.getList("secondName") if not secondName.isEmpty(): secondName = secondName.get(0) if secondName and secondName != "": secondInitial = secondName[0].upper() lookupNames.append("%s, %s. %s." % (surname, firstInitial, secondInitial)) lookupNames.append("%s, %s %s." % (surname, firstName, secondInitial)) lookupNames.append("%s, %s %s" % (surname, firstName, secondName)) lookupNames.append("%s %s %s" % (firstName, secondName, surname)) lookupNames.append("%s, %s." % (surname, firstInitial)) lookupNames.append("%s, %s" % (surname, firstName)) lookupNames.append("%s %s" % (firstName, surname)) query = '" OR dc_title:"'.join(lookupNames) # general word search from each part of the name parts = [p for p in self.getPackageTitle().split(" ") if len(p) > 0] query2 = " OR dc_title:".join(parts) req = SearchRequest('(dc_title:"%s")^2.5 OR (dc_title:%s)^0.5' % (query, query2)) self.log.info("suggestedNames query={}", req.query) req.setParam("fq", 'recordtype:"author"') req.addParam("fq", 'item_type:"object"') req.setParam("rows", "9999") req.setParam("fl", "score") req.setParam("sort", "score desc") # Make sure 'fq' has already been set in the session ##security_roles = self.authentication.get_roles_list(); ##security_query = 'security_filter:("' + '" OR "'.join(security_roles) + '")' ##req.addParam("fq", security_query) out = ByteArrayOutputStream() indexer = self.services.getIndexer() indexer.search(req, out) result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) # self.log.info("result={}", result.toString()) docs = result.getJsonList("response/docs") map = LinkedHashMap() for doc in docs: authorName = doc.getList("dc_title").get(0) if map.containsKey(authorName): authorDocs = map.get(authorName) else: authorDocs = ArrayList() map.put(authorName, authorDocs) authorDocs.add(doc) self.__maxScore = max(1.0, float(result.get("response/maxScore"))) return map
def __search(self): self.__result = JsonConfigHelper() portal = self.services.getPortalManager().get(self.vc("portalId")) recordsPerPage = portal.recordsPerPage query = self.vc("formData").get("query") if query is None or query == "": query = "*:*" req = SearchRequest(query) req.setParam("facet", "true") req.setParam("rows", str(recordsPerPage)) req.setParam("facet.field", portal.facetFieldList) req.setParam("facet.limit", str(portal.facetCount)) req.setParam("sort", "f_dc_title asc") portalQuery = portal.query print " * portalQuery=%s" % portalQuery if portalQuery: portalQuery += "&item_type:object" else: portalQuery += "item_type:object" req.addParam("fq", portalQuery) #Check if there's resumption token exist in the formData if self.__currentToken: start = self.__currentToken.getStart() totalFound = self.__currentToken.getTotalFound() nextTokenStart = start+recordsPerPage if nextTokenStart < totalFound: self.__token = ResumptionToken(start=nextTokenStart, metadataPrefix=self.__metadataPrefix, sessionExpiry=self.__sessionExpiry) else: start = 0 metadataPrefix = self.vc("formData").get("metadataPrefix") self.__token = ResumptionToken(start=recordsPerPage, metadataPrefix=self.__metadataPrefix, sessionExpiry=self.__sessionExpiry) req.setParam("start", str(start)) print " * oai.py:", req.toString() out = ByteArrayOutputStream() self.services.indexer.search(req, out) self.__result = JsonConfigHelper(ByteArrayInputStream(out.toByteArray())) totalFound = int(self.__result.get("response/numFound")) if totalFound == 0: self.__token = None elif self.__token: if self.__token.getStart() < totalFound: self.__token.setTotalFound(totalFound) else: self.__token = None #Storing the resumptionToken to session if self.__token: self.__resumptionTokenList[self.__token.getToken()] = self.__token #(totalFound, self.__token.getConstructedToken()) #Need to know how long the server need to store this token self.sessionState.set("resumptionTokenList", self.__resumptionTokenList)
def __searchExecute(self, search, count): try: search.setParam("start", str(count)) out = ByteArrayOutputStream() self.services.indexer.search(search, out) return SolrResult(ByteArrayInputStream(out.toByteArray())) except Exception, e: self.log.error("Error during search: ", e) return None
def test_serialization(self): s = set(range(5, 10)) output = ByteArrayOutputStream() serializer = ObjectOutputStream(output) serializer.writeObject(s) serializer.close() input = ByteArrayInputStream(output.toByteArray()) unserializer = ObjectInputStream(input) self.assertEqual(s, unserializer.readObject())
def getAttachments(self): attachmentType = "review-attachments" req = SearchRequest("attached_to:%s AND attachment_type:%s" % (self.oid, attachmentType)) req.setParam("rows", "1000") out = ByteArrayOutputStream() self.Services.indexer.search(req, out) response = SolrResult(ByteArrayInputStream(out.toByteArray())) return response.getResults()
def serialize(o, special=False): b = ByteArrayOutputStream() objs = ObjectOutputStream(b) objs.writeObject(o) if not special: OIS = ObjectInputStream else: OIS = PythonObjectInputStream objs = OIS(ByteArrayInputStream(b.toByteArray())) return objs.readObject()
def getFileBytes(path): baos = ByteArrayOutputStream() fis = FileInputStream(File(path)) bytes = jarray.zeros(1024, 'b') while 1 == 1: readSize = fis.read(bytes) if (readSize == -1): break baos.write(bytes, 0, readSize) return baos.toByteArray()
def findPackagesToTransition(self, fromWorkflowId, fromWorkflowStage): req = SearchRequest("workflow_id:" + fromWorkflowId + " AND _query_:\"workflow_step:" + fromWorkflowStage + "\"") req.setParam("fq", "owner:[* TO *]") req.setParam("fq", "security_filter:[* TO *]") out = ByteArrayOutputStream() self.indexer.search(req, out) solrResult = SolrResult(ByteArrayInputStream(out.toByteArray())) return solrResult.getResults()
def findPackagesToPurge(self, packageType): req = SearchRequest("display_type:" + packageType + " AND date_object_created:[* TO NOW-7DAY]") req.setParam("fq", "owner:[* TO *]") req.setParam("fq", "security_filter:[* TO *]") req.setParam("fl", "storage_id,date_object_created,date_object_modified") out = ByteArrayOutputStream() self.indexer.search(req, out) solrResult = SolrResult(ByteArrayInputStream(out.toByteArray())) return solrResult.getResults()
def outputImage(self, bufferedImage, filename): if not filename.endswith(SUPPORTED_FORMATS): filename += SUPPORTED_FORMATS[0] imageType = filename[-3:] from java.io import ByteArrayOutputStream, File from javax.imageio import ImageIO bytes = ByteArrayOutputStream() ImageIO.write(bufferedImage, imageType, bytes) f = File(filename) from com.raytheon.uf.common.util import FileUtil FileUtil.bytes2File(bytes.toByteArray(), f)
def testEncryptedPassword(self): credential_string = 'AES}0vlIcO+I+VWV9aQ1wzQUa1qtByh4D9d0I1dJHa7HsdE=' try: bos = ByteArrayOutputStream() dos = DataOutputStream(bos) dos.writeBytes(credential_string) byte_array = bos.toByteArray() dos.close() except IOException, ioe: self.fail('Unexpected exception writing out credential : ', str(ioe))
def __loadSolrData(self, oid): portal = self.vc("page").getPortal() query = 'id:"%s"' % oid if portal.getSearchQuery(): query += " AND " + portal.getSearchQuery() req = SearchRequest(query) req.addParam("fq", 'item_type:"object"') req.addParam("fq", portal.getQuery()) out = ByteArrayOutputStream() self.vc("Services").getIndexer().search(req, out) return SolrResult(ByteArrayInputStream(out.toByteArray()))
class StringOutputStream(OutputStream): def __init__(self): self.stream = ByteArrayOutputStream() def write(self, b, off, len): self.stream.write(b, off, len) def get_string(self): output = String(self.stream.toByteArray()) if self.stream is not None: self.stream.close() return output