def create_filesystem(self, opts): hadoopopt = getopt(opts, 'hadoop', delete=False) hadoopshort = hadoopopt[0] hadoopdir = findhadoop(hadoopopt[0]) allopts = configopts('streaming') allopts += configopts('streaming_' + hadoopshort) streamingjar = getopt(allopts, 'streamingjar') if streamingjar: streamingjar = streamingjar[0] return StreamingFileSystem(hadoopdir, streamingjar)
def run(self): retval = Iteration.run(self) if retval != 0: return retval if os.path.exists(self.prog): self.opts.append(("file", self.prog)) addedopts = getopts( self.opts, [ "hadoop", "name", "delinputs", "libegg", "libjar", "inputformat", "outputformat", "nummaptasks", "numreducetasks", "priority", "queue", "cachefile", "cachearchive", "file", "codewritable", "addpath", "getpath", "python", "streamoutput", "pypath", ], ) hadoop = findhadoop(addedopts["hadoop"][0]) streamingjar = findjar(hadoop, "streaming") if not streamingjar: print >> sys.stderr, "ERROR: Streaming jar not found" return 1 try: import typedbytes except ImportError: print >> sys.stderr, 'ERROR: "typedbytes" module not found' return 1 modpath = re.sub("\.egg.*$", ".egg", typedbytes.__file__) if modpath.endswith(".egg"): addedopts["libegg"].append(modpath) else: self.opts.append(("file", modpath)) self.opts.append(("jobconf", "stream.map.input=typedbytes")) self.opts.append(("jobconf", "stream.reduce.input=typedbytes")) if addedopts["numreducetasks"] and addedopts["numreducetasks"][0] == "0": self.opts.append(("jobconf", "stream.reduce.output=typedbytes")) if addedopts["streamoutput"]: id_ = addedopts["streamoutput"][0] self.opts.append(("jobconf", "stream.map.output=" + id_)) else: self.opts.append(("jobconf", "stream.map.output=typedbytes")) else: self.opts.append(("jobconf", "stream.map.output=typedbytes")) if addedopts["streamoutput"]: id_ = addedopts["streamoutput"][0] self.opts.append(("jobconf", "stream.reduce.output=" + id_)) else: self.opts.append(("jobconf", "stream.reduce.output=typedbytes")) if not addedopts["name"]: self.opts.append(("jobconf", "mapred.job.name=" + self.prog.split("/")[-1])) else: self.opts.append(("jobconf", "mapred.job.name=%s" % addedopts["name"][0])) if addedopts["nummaptasks"]: self.opts.append(("jobconf", "mapred.map.tasks=%s" % addedopts["nummaptasks"][0])) if addedopts["numreducetasks"]: numreducetasks = int(addedopts["numreducetasks"][0]) self.opts.append(("numReduceTasks", str(numreducetasks))) if addedopts["priority"]: self.opts.append(("jobconf", "mapred.job.priority=%s" % addedopts["priority"][0])) if addedopts["queue"]: self.opts.append(("jobconf", "mapred.job.queue.name=%s" % addedopts["queue"][0])) if addedopts["cachefile"]: for cachefile in addedopts["cachefile"]: self.opts.append(("cacheFile", cachefile)) if addedopts["cachearchive"]: for cachearchive in addedopts["cachearchive"]: self.opts.append(("cacheArchive", cachearchive)) if addedopts["file"]: for file in addedopts["file"]: if not "://" in file: if not os.path.exists(file): raise ValueError('file "' + file + '" does not exist') file = "file://" + os.path.abspath(file) self.opts.append(("file", file)) if not addedopts["inputformat"]: addedopts["inputformat"] = ["auto"] inputformat_shortcuts = { "code": "org.apache.hadoop.streaming.AutoInputFormat", "text": "org.apache.hadoop.mapred.TextInputFormat", "sequencefile": "org.apache.hadoop.streaming.AutoInputFormat", "auto": "org.apache.hadoop.streaming.AutoInputFormat", } inputformat_shortcuts.update(configopts("inputformats", self.prog)) inputformat = addedopts["inputformat"][0] if inputformat_shortcuts.has_key(inputformat.lower()): inputformat = inputformat_shortcuts[inputformat.lower()] self.opts.append(("inputformat", inputformat)) if not addedopts["outputformat"]: addedopts["outputformat"] = ["sequencefile"] if addedopts["getpath"] and addedopts["getpath"] != "no": outputformat_shortcuts = { "code": "fm.last.feathers.output.MultipleSequenceFiles", "text": "fm.last.feathers.output.MultipleTextFiles", "raw": "fm.last.feathers.output.MultipleRawFileOutputFormat", "sequencefile": "fm.last.feathers.output.MultipleSequenceFiles", } else: outputformat_shortcuts = { "code": "org.apache.hadoop.mapred.SequenceFileOutputFormat", "text": "org.apache.hadoop.mapred.TextOutputFormat", "raw": "fm.last.feathers.output.RawFileOutputFormat", "sequencefile": "org.apache.hadoop.mapred.SequenceFileOutputFormat", } outputformat_shortcuts.update(configopts("outputformats", self.prog)) outputformat = addedopts["outputformat"][0] if outputformat_shortcuts.has_key(outputformat.lower()): outputformat = outputformat_shortcuts[outputformat.lower()] self.opts.append(("outputformat", outputformat)) if addedopts["addpath"] and addedopts["addpath"][0] != "no": self.opts.append(("cmdenv", "dumbo_addpath=true")) pyenv = envdef( "PYTHONPATH", addedopts["libegg"], "file", self.opts, shortcuts=dict(configopts("eggs", self.prog)), quote=False, trim=True, extrapaths=addedopts["pypath"], ) if pyenv: self.opts.append(("cmdenv", pyenv)) hadenv = envdef( "HADOOP_CLASSPATH", addedopts["libjar"], "libjar", self.opts, shortcuts=dict(configopts("jars", self.prog)) ) fileopt = getopt(self.opts, "file") if fileopt: tmpfiles = [] for file in fileopt: if file.startswith("file://"): self.opts.append(("file", file[7:])) else: tmpfiles.append(file) if tmpfiles: self.opts.append(("jobconf", "tmpfiles=" + ",".join(tmpfiles))) libjaropt = getopt(self.opts, "libjar") if libjaropt: tmpjars = [] for jar in libjaropt: if jar.startswith("file://"): self.opts.append(("file", jar[7:])) else: tmpjars.append(jar) if tmpjars: self.opts.append(("jobconf", "tmpjars=" + ",".join(tmpjars))) cmd = hadoop + "/bin/hadoop jar " + streamingjar retval = execute(cmd, self.opts, hadenv) if addedopts["delinputs"] and addedopts["delinputs"][0] == "yes": for (key, value) in self.opts: if key == "input": if os.path.exists(hadoop + "/bin/hdfs"): hdfs = hadoop + "/bin/hdfs" else: hdfs = hadoop + "/bin/hadoop" execute("%s dfs -rmr '%s'" % (hdfs, value)) return retval
def create_filesystem(self, opts): hadoopopt = getopt(opts, "hadoop", delete=False) return StreamingFileSystem(findhadoop(hadoopopt[0]))
def run(self): retval = Iteration.run(self) if retval != 0: return retval if os.path.exists(self.prog): self.opts.append(('file', self.prog)) addedopts = getopts(self.opts, ['hadoop', 'name', 'delinputs', 'libegg', 'libjar', 'libjarstreaming', 'inputformat', 'outputformat', 'nummaptasks', 'numreducetasks', 'priority', 'queue', 'cachefile', 'cachearchive', 'file', 'codewritable', 'addpath', 'getpath', 'python', 'streamoutput', 'pypath']) hadoop = findhadoop(addedopts['hadoop'][0]) streamingjar = getopt(self.opts,'streamingjar') if streamingjar is None or len(streamingjar)==0: streamingjar = findjar(hadoop,'streaming') else: streamingjar = streamingjar[0] if not streamingjar: print >> sys.stderr, 'ERROR: Streaming jar not found' return 1 # add typedbytes to path try: import typedbytes except ImportError: print >> sys.stderr, 'ERROR: "typedbytes" module not found' return 1 modpath = re.sub('\.egg.*$', '.egg', typedbytes.__file__) if modpath.endswith('.egg'): addedopts['libegg'].append(modpath) else: self.opts.append(('file', modpath)) # add ctypedbytes to job try: import ctypedbytes print >>sys.stderr, 'INFO: "ctypedbytes" found!' modpath = re.sub('\.egg.*$', '.egg', ctypedbytes.__file__) if modpath.endswith('.egg'): addedopts['libegg'].append(modpath) except ImportError: pass self.opts.append(('jobconf', 'stream.map.input=typedbytes')) self.opts.append(('jobconf', 'stream.reduce.input=typedbytes')) if addedopts['numreducetasks'] and addedopts['numreducetasks'][0] == '0': self.opts.append(('jobconf', 'stream.reduce.output=typedbytes')) if addedopts['streamoutput']: id_ = addedopts['streamoutput'][0] self.opts.append(('jobconf', 'stream.map.output=' + id_)) else: self.opts.append(('jobconf', 'stream.map.output=typedbytes')) else: self.opts.append(('jobconf', 'stream.map.output=typedbytes')) if addedopts['streamoutput']: id_ = addedopts['streamoutput'][0] self.opts.append(('jobconf', 'stream.reduce.output=' + id_)) else: self.opts.append(('jobconf', 'stream.reduce.output=typedbytes')) if not addedopts['name']: self.opts.append(('jobconf', 'mapred.job.name=' + self.prog.split('/')[-1])) else: self.opts.append(('jobconf', 'mapred.job.name=%s' % addedopts['name'][0])) if addedopts['nummaptasks']: self.opts.append(('jobconf', 'mapred.map.tasks=%s' % addedopts['nummaptasks'][0])) if addedopts['numreducetasks']: numreducetasks = int(addedopts['numreducetasks'][0]) self.opts.append(('numReduceTasks', str(numreducetasks))) if addedopts['priority']: self.opts.append(('jobconf', 'mapred.job.priority=%s' % addedopts['priority'][0])) if addedopts['queue']: self.opts.append(('jobconf', 'mapred.job.queue.name=%s' % addedopts['queue'][0])) if addedopts['cachefile']: for cachefile in addedopts['cachefile']: self.opts.append(('cacheFile', cachefile)) if addedopts['cachearchive']: for cachearchive in addedopts['cachearchive']: self.opts.append(('cacheArchive', cachearchive)) if addedopts['file']: for file in addedopts['file']: if not '://' in file: if not os.path.exists(file): raise ValueError('file "' + file + '" does not exist') file = 'file://' + os.path.abspath(file) self.opts.append(('file', file)) if not addedopts['inputformat']: addedopts['inputformat'] = ['auto'] inputformat_shortcuts = \ {'code': 'org.apache.hadoop.streaming.AutoInputFormat', 'text': 'org.apache.hadoop.mapred.TextInputFormat', 'sequencefile': 'org.apache.hadoop.streaming.AutoInputFormat', 'auto': 'org.apache.hadoop.streaming.AutoInputFormat'} inputformat_shortcuts.update(configopts('inputformats', self.prog)) inputformat = addedopts['inputformat'][0] if inputformat_shortcuts.has_key(inputformat.lower()): inputformat = inputformat_shortcuts[inputformat.lower()] self.opts.append(('inputformat', inputformat)) if not addedopts['outputformat']: addedopts['outputformat'] = ['sequencefile'] if addedopts['getpath'] and addedopts['getpath'] != 'no': outputformat_shortcuts = \ {'code': 'fm.last.feathers.output.MultipleSequenceFiles', 'text': 'fm.last.feathers.output.MultipleTextFiles', 'raw': 'fm.last.feathers.output.MultipleRawFileOutputFormat', 'sequencefile': 'fm.last.feathers.output.MultipleSequenceFiles'} else: outputformat_shortcuts = \ {'code': 'org.apache.hadoop.mapred.SequenceFileOutputFormat', 'text': 'org.apache.hadoop.mapred.TextOutputFormat', 'raw': 'fm.last.feathers.output.RawFileOutputFormat', 'sequencefile': 'org.apache.hadoop.mapred.SequenceFileOutputFormat'} outputformat_shortcuts.update(configopts('outputformats', self.prog)) outputformat = addedopts['outputformat'][0] if outputformat_shortcuts.has_key(outputformat.lower()): outputformat = outputformat_shortcuts[outputformat.lower()] self.opts.append(('outputformat', outputformat)) if addedopts['addpath'] and addedopts['addpath'][0] != 'no': self.opts.append(('cmdenv', 'dumbo_addpath=true')) pyenv = envdef('PYTHONPATH', addedopts['libegg'], 'file', self.opts, shortcuts=dict(configopts('eggs', self.prog)), quote=False, trim=True, extrapaths=addedopts['pypath']) if pyenv: self.opts.append(('cmdenv', pyenv)) if addedopts['libjarstreaming'] and addedopts['libjarstreaming'][0] != 'no': addedopts['libjar'].append(streamingjar) hadenv = envdef('HADOOP_CLASSPATH', addedopts['libjar'], 'libjar', self.opts, shortcuts=dict(configopts('jars', self.prog))) fileopt = getopt(self.opts, 'file') if fileopt: tmpfiles = [] for file in fileopt: if file.startswith('file://'): self.opts.append(('file', file[7:])) else: tmpfiles.append(file) if tmpfiles: self.opts.append(('jobconf', 'tmpfiles=' + ','.join(tmpfiles))) libjaropt = getopt(self.opts, 'libjar') if libjaropt: tmpjars = [] for jar in libjaropt: if jar.startswith('file://'): self.opts.append(('file', jar[7:])) else: tmpjars.append(jar) if tmpjars: self.opts.append(('jobconf', 'tmpjars=' + ','.join(tmpjars))) cmd = hadoop + '/bin/hadoop jar ' + streamingjar retval = execute(cmd, self.opts, hadenv) if addedopts['delinputs'] and addedopts['delinputs'][0] == 'yes': for (key, value) in self.opts: if key == 'input': if os.path.exists(hadoop + "/bin/hdfs"): hdfs = hadoop + "/bin/hdfs" else: hdfs = hadoop + "/bin/hadoop" execute("%s dfs -rmr '%s'" % (hdfs, value)) return retval
def run(self): retval = Iteration.run(self) if retval != 0: return retval opts = self.opts if os.path.exists(self.prog): opts.add('file', self.prog) keys = [ 'hadoop', 'name', 'delinputs', 'libegg', 'libjar', 'inputformat', 'outputformat', 'nummaptasks', 'numreducetasks', 'priority', 'queue', 'cachefile', 'cachearchive', 'file', 'codewritable', 'addpath', 'getpath', 'python', 'streamoutput', 'pypath', 'hadooplib' ] addedopts = opts.filter(keys) opts.remove(*keys) hadoop = findhadoop(addedopts['hadoop'][0]) streamingjar = findjar(hadoop, 'streaming', addedopts['hadooplib']) if not streamingjar: print >> sys.stderr, 'ERROR: Streaming jar not found' return 1 try: import typedbytes except ImportError: print >> sys.stderr, 'ERROR: "typedbytes" module not found' return 1 modpath = re.sub('\.egg.*$', '.egg', typedbytes.__file__) if modpath.endswith('.egg'): addedopts.add('libegg', modpath) else: opts.add('file', modpath) opts.add('jobconf', 'stream.map.input=typedbytes') opts.add('jobconf', 'stream.reduce.input=typedbytes') if addedopts['numreducetasks'] and addedopts['numreducetasks'][ 0] == '0': opts.add('jobconf', 'stream.reduce.output=typedbytes') if addedopts['streamoutput']: id_ = addedopts['streamoutput'][0] opts.add('jobconf', 'stream.map.output=' + id_) else: opts.add('jobconf', 'stream.map.output=typedbytes') else: opts.add('jobconf', 'stream.map.output=typedbytes') if addedopts['streamoutput']: id_ = addedopts['streamoutput'][0] opts.add('jobconf', 'stream.reduce.output=' + id_) else: opts.add('jobconf', 'stream.reduce.output=typedbytes') progname = self.prog.split('/')[-1] if not addedopts['name'] \ else addedopts['name'][0] opts.add('jobconf', 'mapred.job.name=%s' % progname) nummaptasks = addedopts['nummaptasks'] numreducetasks = addedopts['numreducetasks'] if nummaptasks: opts.add('jobconf', 'mapred.map.tasks=%s' % nummaptasks[0]) if numreducetasks: opts.add('numReduceTasks', numreducetasks[0]) if addedopts['priority']: opts.add('jobconf', 'mapred.job.priority=%s' % addedopts['priority'][0]) if addedopts['queue']: opts.add('jobconf', 'mapred.job.queue.name=%s' % addedopts['queue'][0]) for cachefile in addedopts['cachefile']: opts.add('cacheFile', cachefile) for cachearchive in addedopts['cachearchive']: opts.add('cacheArchive', cachearchive) for _file in addedopts['file']: if not '://' in _file: if not os.path.exists(_file): raise ValueError('file "%s" does not exist' % _file) _file = 'file://%s' % os.path.abspath(_file) opts.add('file', _file) if not addedopts['inputformat']: addedopts.add('inputformat', 'auto') inputformat_shortcuts = { 'code': 'org.apache.hadoop.streaming.AutoInputFormat', 'text': 'org.apache.hadoop.mapred.TextInputFormat', 'sequencefile': 'org.apache.hadoop.streaming.AutoInputFormat', 'auto': 'org.apache.hadoop.streaming.AutoInputFormat' } inputformat_shortcuts.update(configopts('inputformats', self.prog)) inputformat = addedopts['inputformat'][0] if inputformat.lower() in inputformat_shortcuts: inputformat = inputformat_shortcuts[inputformat.lower()] opts.add('inputformat', inputformat) if not addedopts['outputformat']: addedopts.add('outputformat', 'sequencefile') if addedopts['getpath'] and 'no' not in addedopts['getpath']: outputformat_shortcuts = { 'code': 'fm.last.feathers.output.MultipleSequenceFiles', 'text': 'fm.last.feathers.output.MultipleTextFiles', 'raw': 'fm.last.feathers.output.MultipleRawFileOutputFormat', 'sequencefile': 'fm.last.feathers.output.MultipleSequenceFiles' } else: outputformat_shortcuts = { 'code': 'org.apache.hadoop.mapred.SequenceFileOutputFormat', 'text': 'org.apache.hadoop.mapred.TextOutputFormat', 'raw': 'fm.last.feathers.output.RawFileOutputFormat', 'sequencefile': 'org.apache.hadoop.mapred.SequenceFileOutputFormat' } outputformat_shortcuts.update(configopts('outputformats', self.prog)) outputformat = addedopts['outputformat'][0] if outputformat.lower() in outputformat_shortcuts: outputformat = outputformat_shortcuts[outputformat.lower()] opts.add('outputformat', outputformat) if addedopts['addpath'] and 'no' not in addedopts['addpath']: opts.add('cmdenv', 'dumbo_addpath=true') pyenv = envdef('PYTHONPATH', addedopts['libegg'], 'file', self.opts, shortcuts=dict(configopts('eggs', self.prog)), quote=False, trim=True, extrapaths=addedopts['pypath']) if pyenv: opts.add('cmdenv', pyenv) hadenv = envdef('HADOOP_CLASSPATH', addedopts['libjar'], 'libjar', self.opts, shortcuts=dict(configopts('jars', self.prog))) tmpfiles = [] for _file in opts.pop('file'): if _file.startswith('file://'): opts.add('file', _file[7:]) else: tmpfiles.append(_file) if tmpfiles: opts.add('jobconf', 'tmpfiles=%s' % ','.join(tmpfiles)) tmpjars = [] for jar in opts.pop('libjar'): if jar.startswith('file://'): opts.add('file', jar[7:]) else: tmpjars.append(jar) if tmpjars: opts.add('jobconf', 'tmpjars=%s' % ','.join(tmpjars)) cmd = hadoop + '/bin/hadoop jar ' + streamingjar retval = execute(cmd, opts, hadenv) if 'yes' in addedopts['delinputs']: inputs = opts['input'] for path in inputs: execute("%s/bin/hadoop fs -rmr '%s'" % (hadoop, path)) return retval
def create_filesystem(self, opts): return StreamingFileSystem(findhadoop(opts['hadoop'][0]))
def run(self): retval = Iteration.run(self) if retval != 0: return retval opts = self.opts if os.path.exists(self.prog): opts.add('file', self.prog) keys = ['hadoop', 'name', 'delinputs', 'libegg', 'libjar', 'inputformat', 'outputformat', 'nummaptasks', 'numreducetasks', 'priority', 'queue', 'cachefile', 'cachearchive', 'file', 'codewritable', 'addpath', 'getpath', 'python', 'streamoutput', 'pypath', 'hadooplib'] addedopts = opts.filter(keys) opts.remove(*keys) hadoop = findhadoop(addedopts['hadoop'][0]) streamingjar = findjar(hadoop, 'streaming', addedopts['hadooplib']) if not streamingjar: print >> sys.stderr, 'ERROR: Streaming jar not found' return 1 try: import typedbytes except ImportError: print >> sys.stderr, 'ERROR: "typedbytes" module not found' return 1 modpath = re.sub('\.egg.*$', '.egg', typedbytes.__file__) if modpath.endswith('.egg'): addedopts.add('libegg', modpath) else: opts.add('file', modpath) opts.add('jobconf', 'stream.map.input=typedbytes') opts.add('jobconf', 'stream.reduce.input=typedbytes') if addedopts['numreducetasks'] and addedopts['numreducetasks'][0] == '0': opts.add('jobconf', 'stream.reduce.output=typedbytes') if addedopts['streamoutput']: id_ = addedopts['streamoutput'][0] opts.add('jobconf', 'stream.map.output=' + id_) else: opts.add('jobconf', 'stream.map.output=typedbytes') else: opts.add('jobconf', 'stream.map.output=typedbytes') if addedopts['streamoutput']: id_ = addedopts['streamoutput'][0] opts.add('jobconf', 'stream.reduce.output=' + id_) else: opts.add('jobconf', 'stream.reduce.output=typedbytes') progname = self.prog.split('/')[-1] if not addedopts['name'] \ else addedopts['name'][0] opts.add('jobconf', 'mapred.job.name=%s' % progname) nummaptasks = addedopts['nummaptasks'] numreducetasks = addedopts['numreducetasks'] if nummaptasks: opts.add('jobconf', 'mapred.map.tasks=%s' % nummaptasks[0]) if numreducetasks: opts.add('numReduceTasks', numreducetasks[0]) if addedopts['priority']: opts.add('jobconf', 'mapred.job.priority=%s' % addedopts['priority'][0]) if addedopts['queue']: opts.add('jobconf', 'mapred.job.queue.name=%s' % addedopts['queue'][0]) for cachefile in addedopts['cachefile']: opts.add('cacheFile', cachefile) for cachearchive in addedopts['cachearchive']: opts.add('cacheArchive', cachearchive) for _file in addedopts['file']: if not '://' in _file: if not os.path.exists(_file): raise ValueError('file "%s" does not exist' % _file) _file = 'file://%s' % os.path.abspath(_file) opts.add('file', _file) if not addedopts['inputformat']: addedopts.add('inputformat', 'auto') inputformat_shortcuts = { 'code': 'org.apache.hadoop.streaming.AutoInputFormat', 'text': 'org.apache.hadoop.mapred.TextInputFormat', 'sequencefile': 'org.apache.hadoop.streaming.AutoInputFormat', 'auto': 'org.apache.hadoop.streaming.AutoInputFormat' } inputformat_shortcuts.update(configopts('inputformats', self.prog)) inputformat = addedopts['inputformat'][0] if inputformat.lower() in inputformat_shortcuts: inputformat = inputformat_shortcuts[inputformat.lower()] opts.add('inputformat', inputformat) if not addedopts['outputformat']: addedopts.add('outputformat', 'sequencefile') if addedopts['getpath'] and 'no' not in addedopts['getpath']: outputformat_shortcuts = { 'code': 'fm.last.feathers.output.MultipleSequenceFiles', 'text': 'fm.last.feathers.output.MultipleTextFiles', 'raw': 'fm.last.feathers.output.MultipleRawFileOutputFormat', 'sequencefile': 'fm.last.feathers.output.MultipleSequenceFiles' } else: outputformat_shortcuts = { 'code': 'org.apache.hadoop.mapred.SequenceFileOutputFormat', 'text': 'org.apache.hadoop.mapred.TextOutputFormat', 'raw': 'fm.last.feathers.output.RawFileOutputFormat', 'sequencefile': 'org.apache.hadoop.mapred.SequenceFileOutputFormat' } outputformat_shortcuts.update(configopts('outputformats', self.prog)) outputformat = addedopts['outputformat'][0] if outputformat.lower() in outputformat_shortcuts: outputformat = outputformat_shortcuts[outputformat.lower()] opts.add('outputformat', outputformat) if addedopts['addpath'] and 'no' not in addedopts['addpath']: opts.add('cmdenv', 'dumbo_addpath=true') pyenv = envdef('PYTHONPATH', addedopts['libegg'], 'file', self.opts, shortcuts=dict(configopts('eggs', self.prog)), quote=False, trim=True, extrapaths=addedopts['pypath']) if pyenv: opts.add('cmdenv', pyenv) hadenv = envdef('HADOOP_CLASSPATH', addedopts['libjar'], 'libjar', self.opts, shortcuts=dict(configopts('jars', self.prog))) tmpfiles = [] for _file in opts.pop('file'): if _file.startswith('file://'): opts.add('file', _file[7:]) else: tmpfiles.append(_file) if tmpfiles: opts.add('jobconf', 'tmpfiles=%s' % ','.join(tmpfiles)) tmpjars = [] for jar in opts.pop('libjar'): if jar.startswith('file://'): opts.add('file', jar[7:]) else: tmpjars.append(jar) if tmpjars: opts.add('jobconf', 'tmpjars=%s' % ','.join(tmpjars)) cmd = hadoop + '/bin/hadoop jar ' + streamingjar retval = execute(cmd, opts, hadenv) if 'yes' in addedopts['delinputs']: inputs = opts['input'] for path in inputs: execute("%s/bin/hadoop fs -rmr '%s'" % (hadoop, path)) return retval
def run(self): retval = Iteration.run(self) if retval != 0: return retval if os.path.exists(self.prog): self.opts.append(('file', self.prog)) addedopts = getopts(self.opts, ['hadoop', 'name', 'delinputs', 'libegg', 'libjar', 'inputformat', 'outputformat', 'nummaptasks', 'numreducetasks', 'priority', 'queue', 'cachefile', 'cachearchive', 'file', 'codewritable', 'addpath', 'getpath', 'python', 'streamoutput', 'pypath']) hadoop = findhadoop(addedopts['hadoop'][0]) streamingjar = findjar(hadoop, 'streaming') if not streamingjar: print >> sys.stderr, 'ERROR: Streaming jar not found' return 1 try: import typedbytes except ImportError: print >> sys.stderr, 'ERROR: "typedbytes" module not found' return 1 modpath = re.sub('\.egg.*$', '.egg', typedbytes.__file__) if modpath.endswith('.egg'): addedopts['libegg'].append(modpath) else: self.opts.append(('file', modpath)) self.opts.append(('jobconf', 'stream.map.input=typedbytes')) self.opts.append(('jobconf', 'stream.reduce.input=typedbytes')) if addedopts['numreducetasks'] and addedopts['numreducetasks'][0] == '0': self.opts.append(('jobconf', 'stream.reduce.output=typedbytes')) if addedopts['streamoutput']: id_ = addedopts['streamoutput'][0] self.opts.append(('jobconf', 'stream.map.output=' + id_)) else: self.opts.append(('jobconf', 'stream.map.output=typedbytes')) else: self.opts.append(('jobconf', 'stream.map.output=typedbytes')) if addedopts['streamoutput']: id_ = addedopts['streamoutput'][0] self.opts.append(('jobconf', 'stream.reduce.output=' + id_)) else: self.opts.append(('jobconf', 'stream.reduce.output=typedbytes')) if not addedopts['name']: self.opts.append(('jobconf', 'mapred.job.name=' + self.prog.split('/')[-1])) else: self.opts.append(('jobconf', 'mapred.job.name=%s' % addedopts['name'][0])) if addedopts['nummaptasks']: self.opts.append(('jobconf', 'mapred.map.tasks=%s' % addedopts['nummaptasks'][0])) if addedopts['numreducetasks']: numreducetasks = int(addedopts['numreducetasks'][0]) self.opts.append(('numReduceTasks', str(numreducetasks))) if addedopts['priority']: self.opts.append(('jobconf', 'mapred.job.priority=%s' % addedopts['priority'][0])) if addedopts['queue']: self.opts.append(('jobconf', 'mapred.job.queue.name=%s' % addedopts['queue'][0])) if addedopts['cachefile']: for cachefile in addedopts['cachefile']: self.opts.append(('cacheFile', cachefile)) if addedopts['cachearchive']: for cachearchive in addedopts['cachearchive']: self.opts.append(('cacheArchive', cachearchive)) if addedopts['file']: for file in addedopts['file']: if not '://' in file: if not os.path.exists(file): raise ValueError('file "' + file + '" does not exist') file = 'file://' + os.path.abspath(file) self.opts.append(('file', file)) if not addedopts['inputformat']: addedopts['inputformat'] = ['auto'] inputformat_shortcuts = \ {'code': 'org.apache.hadoop.streaming.AutoInputFormat', 'text': 'org.apache.hadoop.mapred.TextInputFormat', 'sequencefile': 'org.apache.hadoop.streaming.AutoInputFormat', 'auto': 'org.apache.hadoop.streaming.AutoInputFormat'} inputformat_shortcuts.update(configopts('inputformats', self.prog)) inputformat = addedopts['inputformat'][0] if inputformat_shortcuts.has_key(inputformat.lower()): inputformat = inputformat_shortcuts[inputformat.lower()] self.opts.append(('inputformat', inputformat)) if not addedopts['outputformat']: addedopts['outputformat'] = ['sequencefile'] if addedopts['getpath'] and addedopts['getpath'] != 'no': outputformat_shortcuts = \ {'code': 'fm.last.feathers.output.MultipleSequenceFiles', 'text': 'fm.last.feathers.output.MultipleTextFiles', 'raw': 'fm.last.feathers.output.MultipleRawFileOutputFormat', 'sequencefile': 'fm.last.feathers.output.MultipleSequenceFiles'} else: outputformat_shortcuts = \ {'code': 'org.apache.hadoop.mapred.SequenceFileOutputFormat', 'text': 'org.apache.hadoop.mapred.TextOutputFormat', 'raw': 'fm.last.feathers.output.RawFileOutputFormat', 'sequencefile': 'org.apache.hadoop.mapred.SequenceFileOutputFormat'} outputformat_shortcuts.update(configopts('outputformats', self.prog)) outputformat = addedopts['outputformat'][0] if outputformat_shortcuts.has_key(outputformat.lower()): outputformat = outputformat_shortcuts[outputformat.lower()] self.opts.append(('outputformat', outputformat)) if addedopts['addpath'] and addedopts['addpath'][0] != 'no': self.opts.append(('cmdenv', 'dumbo_addpath=true')) pyenv = envdef('PYTHONPATH', addedopts['libegg'], 'file', self.opts, shortcuts=dict(configopts('eggs', self.prog)), quote=False, trim=True, extrapaths=addedopts['pypath']) if pyenv: self.opts.append(('cmdenv', pyenv)) hadenv = envdef('HADOOP_CLASSPATH', addedopts['libjar'], 'libjar', self.opts, shortcuts=dict(configopts('jars', self.prog))) fileopt = getopt(self.opts, 'file') if fileopt: tmpfiles = [] for file in fileopt: if file.startswith('file://'): self.opts.append(('file', file[7:])) else: tmpfiles.append(file) if tmpfiles: self.opts.append(('jobconf', 'tmpfiles=' + ','.join(tmpfiles))) libjaropt = getopt(self.opts, 'libjar') if libjaropt: tmpjars = [] for jar in libjaropt: if jar.startswith('file://'): self.opts.append(('file', jar[7:])) else: tmpjars.append(jar) if tmpjars: self.opts.append(('jobconf', 'tmpjars=' + ','.join(tmpjars))) cmd = hadoop + '/bin/hadoop jar ' + streamingjar retval = execute(cmd, self.opts, hadenv) if addedopts['delinputs'] and addedopts['delinputs'][0] == 'yes': for (key, value) in self.opts: if key == 'input': if os.path.exists(hadoop + "/bin/hdfs"): hdfs = hadoop + "/bin/hdfs" else: hdfs = hadoop + "/bin/hadoop" execute("%s dfs -rmr '%s'" % (hdfs, value)) return retval
def create_filesystem(self, opts): hadoopopt = getopt(opts, 'hadoop', delete=False) return StreamingFileSystem(findhadoop(hadoopopt[0]))