def request(self, host, handler, request, verbose=False): debug_stream = None if not verbose else cStringIO.StringIO() ret = self.urlopen(host + handler, 'POST', payload=request, to_string=True, debug_stream=debug_stream) if debug_stream is not None: debug_stream.seek(0) print_(debug_stream.read()) return ret
def to_xml(cls, value, tns, parent_elt, name='retval'): ''' This class method takes the data from the attachment and base64 encodes it as the text of an Element. An attachment can specify a file_name and if no data is given, it will read the data from the file ''' assert isinstance(value, cls) element = etree.ElementTree.SubElement(parent_elt, '{%s}%s' % (tns,name)) if value.data: # the data has already been loaded, just encode # and return the element element.text = base64.encodestring(value.data) elif value.file_name: # the data hasn't been loaded, but a file has been # specified data_string = cStringIO.StringIO() file_name = value.file_name file = open(file_name, 'rb') base64.encode(file, data_string) file.close() # go back to the begining of the data data_string.seek(0) element.text = str(data_string.read()) else: raise Exception("Neither data nor a file_name has been specified")
def main(): global dev, known_devices_list known_devices_list = get_known_devices() # define which frame to use, here use Samsung SPF-87H device0 = known_devices_list[0] # Mini Monitor mode device1 = known_devices_list[1] # Mass Storage mode dev = init_device(device0, device1) print( "Frame is in Mini Monitor mode and initialized. Sending pictures now") image = Image.open("mypicture.jpg") #manipulations to consider: # convert # thumbnail # rotate # crop image = image.resize((800, 480)) output = StringIO.StringIO() image.save(output, "JPEG", quality=94) pic = output.getvalue() output.close() write_jpg2frame(dev, pic)
def runSingleTest(args): #className, outfilename, trainingparam, dsname, gtname, evalconfig = args CLUSTER_MODE = True if CLUSTER_MODE: singleTaskFile = join(split(__file__)[0], 'classificationtask.py') proc = subprocess.Popen(['python', singleTaskFile], stdin=subprocess.PIPE) # do not capture stdout nor stderr, so that we have logging messages in "real-time" #stdout = subprocess.PIPE, #stderr = subprocess.PIPE) inputs = cStringIO.StringIO() #cPickle.dump((className, outfilename, trainingparam, dsname, gtname, evalconfig), inputs) cPickle.dump(args, inputs) stdout, stderr = proc.communicate(inputs.getvalue()) # TODO: how do we check for errors here? # we can't use stderr, because that's where we log the messages... else: from .classificationtask import ClassificationTask task = ClassificationTask() try: #task.run(className, outfilename, trainingparam, dsname, gtname, evalconfig) task.run(*args) except Exception as e: log.error('Running task failed: %s' % e)
def tempnam(): ''' returns a temporary file-name ''' # prevent os.tmpname from printing an error... stderr = sys.stderr try: sys.stderr = cStringIO.StringIO() return os.tempnam(None, 'tess_') finally: sys.stderr = stderr
def check(file_staged_for_commit, options): basename = os.path.basename(file_staged_for_commit.path) if not fnmatch.fnmatch(basename, options.ini_files): return True contents = cStringIO.StringIO(file_staged_for_commit.contents) parser = configparser.RawConfigParser() try: parser.readfp(contents, file_staged_for_commit.path) except configparser.Error as e: print(e) return False else: return True
def format_lp(nodes, constraints_x, qa, constraints_y, qb): """ Maximize 4 x1 + 2 x2 + 3 x3 + x4 Subject To x1 + x2 <= 1 End """ lp_handle = cStringIO.StringIO() lp_handle.write("Maximize\n ") records = 0 for i, score in nodes: lp_handle.write("+ %d x%d " % (score, i)) # SCIP does not like really long string per row records += 1 if records % 10 == 0: lp_handle.write("\n") lp_handle.write("\n") num_of_constraints = 0 lp_handle.write("Subject To\n") for c in constraints_x: additions = " + ".join("x%d" % (x + 1) for x in c) lp_handle.write(" %s <= %d\n" % (additions, qa)) num_of_constraints += len(constraints_x) # non-self if not (constraints_x is constraints_y): for c in constraints_y: additions = " + ".join("x%d" % (x + 1) for x in c) lp_handle.write(" %s <= %d\n" % (additions, qb)) num_of_constraints += len(constraints_y) print("number of variables (%d), number of constraints (%d)" % (len(nodes), num_of_constraints), file=sys.stderr) lp_handle.write("Binary\n") for i, score in nodes: lp_handle.write(" x%d\n" % i) lp_handle.write("End\n") lp_data = lp_handle.getvalue() lp_handle.close() return lp_data
def csv(self, filename=None, **format_params): """Generate results in comma-separated form. Write to ``filename`` if given. Any other parameterw will be passed on to csv.writer.""" if not self.pretty: return None # no results if filename: outfile = open(filename, 'w') else: outfile = cStringIO.StringIO() writer = UnicodeWriter(outfile, **format_params) writer.writerow(self.field_names) for row in self: writer.writerow(row) if filename: outfile.close() return CsvResultDescriptor(filename) else: return outfile.getvalue()
def __init__(self, fp, splitchap=False, toctree=None, deflang=None, labelprefix=''): self.splitchap = splitchap # split output at chapters? if splitchap: self.fp = StringIO.StringIO() # dummy one self.chapters = [self.fp] else: self.fp = fp # file pointer self.toctree = toctree # entries for the TOC tree self.deflang = deflang # default highlighting language self.labelprefix = labelprefix # prefix for all label names # indentation tools self.indentation = '' # current indentation string self.indentfirstline = True # indent the first line of next paragraph? self.indented = Indenter(self) # convenience context manager # paragraph flushing tools self.flush_cb = None # callback run on next paragraph flush, used # for properly separating field lists from # the following paragraph self.no_flushing = 0 # raise an error on paragraph flush? self.noflush = NoFlush(self) # convenience context manager # collected items to output later self.curpar = [] # text in current paragraph self.comments = [] # comments to be output after flushing self.indexentries = [] # indexentries to be output before flushing self.footnotes = [] # footnotes to be output at document end self.warnings = [] # warnings while writing # specials self.sectionlabel = '' # most recent \label command self.thisclass = '' # most recent classdesc name self.sectionmeta = None # current section metadata self.noescape = 0 # don't escape text nodes self.indexsubitem = '' # current \withsubitem text
def ini_parse(lines): """ :param lines: :type lines: str :return: """ try: fd = cStringIO.StringIO(lines) parser = configparser.ConfigParser() parser.readfp(fd) return dict(parser.items('default')) except Exception as e: try: # TODO: Remove the parsing of ini-like file via regex conf = find_all(INI, lines) logging.warning("Using non-INI files for database configuration " "file is deprecated. Falling back to Regex.") logging.warning("INI parser error was: {}".format(str(e))) return conf except Exception: logging.warning("Couldn't parse non-INI config file using Regex") raise
def add_file(filename, contents): info = tarfile.TarInfo(filename) info.size = len(contents) tar.addfile(tarinfo=info, fileobj=cStringIO.StringIO(contents))
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): # Redirect output to a queue self.queue = cStringIO.StringIO() self.writer = csv.writer(self.queue, dialect=dialect, **kwds) self.stream = f self.encoder = codecs.getincrementalencoder(encoding)()
def analyze(rpmfile, show_errors=False, opformat="json"): """Analyse single RPM file""" if not os.path.exists(rpmfile): print >> sys.stderr, "%s doesn't exists!" % rpmfile return if not rpmfile.endswith(".rpm"): # print >> sys.stderr, "skipping %s" % os.path.basename(rpmfile) return try: a = libarchive.Archive(rpmfile) except Exception as exc: print >> sys.stderr, rpmfile, str(exc) return try: ts = rpm.TransactionSet() ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) fd = os.open(rpmfile, os.O_RDONLY) h = ts.hdrFromFdno(fd) os.close(fd) except Exception as exc: print >> sys.stderr, rpmfile, str(exc) return # create lookup dictionary # print dir(h) # print dir(rpm) nvr = h[rpm.RPMTAG_NVR] package = h[rpm.RPMTAG_NAME] group = h[rpm.RPMTAG_GROUP] caps = h[rpm.RPMTAG_FILECAPS] names = h['FILENAMES'] groups = h[rpm.RPMTAG_FILEGROUPNAME] users = h[rpm.RPMTAG_FILEUSERNAME] lookup = defaultdict(list) for n, u, g in zip(names, users, groups): lookup[n].append((u, g)) filecaps = [] for i, cap in enumerate(caps): if cap: filecaps.append([names[i], cap]) pols = [] lines = "" output = {} output["package"] = package output["group"] = group output["build"] = os.path.basename(rpmfile) output["files"] = [] output["daemon"] = False output["nvr"] = nvr output["filecaps"] = filecaps output["polkit"] = False output["caps"] = False output["pols"] = pols if filecaps: output["caps"] = True flag = False for entry in a: directory = False size = entry.size # polkit checks, "startswith" is better but ... if "/etc/polkit" in entry.pathname or \ "/usr/share/PolicyKit" in entry.pathname or \ "/usr/share/polkit-1" in entry.pathname: pols.append(entry.pathname) output["polkit"] = True # check if package is a daemon if "/etc/rc.d/init.d" in entry.pathname or \ "/lib/systemd" in entry.pathname: output["daemon"] = True # skip 0 byte files only # NOTE: size can be 0 due to compression also! if size == 0 and not stat.S_ISDIR(entry.mode): continue # we are only interested in particular kind of directories if stat.S_ISDIR(entry.mode): if not ((entry.mode & stat.S_ISUID) or (stat.S_ISGID & entry.mode)): continue else: flag = True directory = True # check for executable flag # if not (entry.mode & 0111): # continue # always report setxid files if ((entry.mode & stat.S_ISUID) or (stat.S_ISGID & entry.mode)): flag = True # skip library files filename = entry.pathname.lstrip(".") # if not flag and (("lib" in filename and ".so" in filename) or \ # filename.endswith(".so")): # continue try: contents = a.read(size) except Exception: continue # invoke checksec only on files returncode = -1 if not directory: try: fh = cStringIO.StringIO(contents) elf = Elf(fh) if opformat == "json": out = process_file(elf, deps=True) # polkit check 2 if "polkit" in out: output["polkit"] = True else: out = process_file(elf) dataline = "%s,%s,%s,mode=%s,%s" % ( package, os.path.basename(rpmfile), filename, oct(entry.mode), out) returncode = 0 except ELFError as exc: if show_errors: print >> sys.stderr, "%s,%s,Not an ELF binary" % \ (filename, str(exc)) continue except IOError as exc: if show_errors: print >> sys.stderr, "%s,%s,Not an ELF binary" % \ (filename, str(exc)) continue if flag or returncode == 0: # populate fileinfo object fileinfo = {} fileinfo["name"] = filename fileinfo["size"] = entry.size fileinfo["mode"] = entry.mode fileinfo["user"], fileinfo["group"] = lookup[filename][0] if directory: fileinfo["directory"] = directory output["files"].append(fileinfo) if returncode == 0 and opformat == "csv": lines = lines + dataline + "\n" else: # print >> sys.stderr, dataline pass if returncode == 0 and opformat == "json": try: for kvp in out.split(","): key, value = kvp.split("=") fileinfo[key] = value except Exception: pass a.close() if opformat == "json": return json.dumps(output) else: return lines.rstrip()
def new_chapter(self): """ Called if self.splitchap is True. Create a new file pointer and set self.fp to it. """ new_fp = StringIO.StringIO() self.chapters.append(new_fp) self.fp = new_fp