def publish_torrent(self, torrent, publish_path): df = self.open_torrent_arg(torrent) yield df try: metainfo = df.getResult() except GetTorrent.GetTorrentException: self.logger.exception("publish_torrent failed") return df = self.multitorrent.create_torrent(metainfo, efs2(publish_path), efs2(publish_path)) yield df df.getResult()
def determine_filename(self, infohash): # THIS FUNCTION IS PARTICULARLY CONVOLUTED. BLECH! --Dave path, metainfo = self.torrent_cache[infohash] #path = efs2(path) name = metainfo.name_fs savein = efs2(self.config['save_in']) isdir = metainfo.is_batch style = self.config['saveas_style'] if style == 4: torrentname = os.path.split(path[:-8])[1] suggestedname = name if torrentname == suggestedname: style = 1 else: style = 3 if style == 1 or style == 3: if savein: file = os.path.basename(path) saveas= \ os.path.join(savein, file[:-8]) #strip '.torrent' else: saveas = path[:-8] # strip '.torrent' if style == 3 and not isdir: saveas = os.path.join(saveas, name) else: if savein: saveas = os.path.join(savein, name) else: saveas = os.path.join(os.path.split(path)[0], name) return saveas
def get_temp_subdir(): """Creates a unique subdirectory of the platform temp directory. This revolves between MAX_DIR directory names deleting the oldest whenever MAX_DIR exist. Upon return the number of temporary subdirectories should never exceed MAX_DIR-1. If one has already been created for this execution, this returns that subdirectory. @return the absolute path of the created temporary directory. """ global _tmp_subdir if _tmp_subdir is not None: return _tmp_subdir tmp = get_temp_dir() target = None # holds the name of the directory that will be made. for i in xrange(MAX_DIR): subdir = efs2(u"BitTorrentTemp%d" % i) path = os.path.join(tmp, subdir) if not os.path.exists(path): target = path break # subdir should not in normal behavior be None. It can occur if something # prevented a directory from being removed on a previous call or if MAX_DIR # is changed. if target is None: subdir = efs2(u"BitTorrentTemp0") path = os.path.join(tmp, subdir) shutil.rmtree( path, ignore_errors = True ) target = path i = 0 # create the temp dir. os.mkdir(target) # delete the oldest directory. oldest_i = ( i + 1 ) % MAX_DIR oldest_subdir = efs2(u"BitTorrentTemp%d" % oldest_i) oldest_path = os.path.join(tmp, oldest_subdir) if os.path.exists( oldest_path ): shutil.rmtree( oldest_path, ignore_errors = True ) _tmp_subdir = target return target
def get_save_dir(): dirname = u'%s Downloads' % unicode(app_name) dirname = efs2(dirname) if os.name == 'nt': d = get_shell_dir(shellcon.CSIDL_PERSONAL) if d is None: d = desktop else: d = desktop return os.path.join(d, dirname)
def get_temp_subdir(): """Creates a unique subdirectory of the platform temp directory. This revolves between MAX_DIR directory names deleting the oldest whenever MAX_DIR exist. Upon return the number of temporary subdirectories should never exceed MAX_DIR-1. If one has already been created for this execution, this returns that subdirectory. @return the absolute path of the created temporary directory. """ global _tmp_subdir if _tmp_subdir is not None: return _tmp_subdir tmp = get_temp_dir() target = None # holds the name of the directory that will be made. for i in xrange(MAX_DIR): subdir = efs2(u"BitTorrentTemp%d" % i) path = os.path.join(tmp, subdir) if not os.path.exists(path): target = path break # subdir should not in normal behavior be None. It can occur if something # prevented a directory from being removed on a previous call or if MAX_DIR # is changed. if target is None: subdir = efs2(u"BitTorrentTemp0") path = os.path.join(tmp, subdir) shutil.rmtree(path, ignore_errors=True) target = path i = 0 # create the temp dir. os.mkdir(target) # delete the oldest directory. oldest_i = (i + 1) % MAX_DIR oldest_subdir = efs2(u"BitTorrentTemp%d" % oldest_i) oldest_path = os.path.join(tmp, oldest_subdir) if os.path.exists(oldest_path): shutil.rmtree(oldest_path, ignore_errors=True) _tmp_subdir = target return target
def write_pid_file(fname, errorfunc = None): """Creates a pid file on platforms that typically create such files; otherwise, this returns without doing anything. The fname should not include a path. The file will be placed in the appropriate platform-specific directory (/var/run in linux). """ assert type(fname) == str assert errorfunc == None or callable(errorfunc) if os.name == 'nt': return try: pid_fname = os.path.join(efs2(u'/var/run'),fname) file(pid_fname, 'w').write(str(os.getpid())) except: try: pid_fname = os.path.join(efs2(u'/etc/tmp'),fname) except: if errorfunc: errorfunc("Couldn't open pid file. Continuing without one.") else: pass # just continue without reporting warning.
def write_pid_file(fname, errorfunc=None): """Creates a pid file on platforms that typically create such files; otherwise, this returns without doing anything. The fname should not include a path. The file will be placed in the appropriate platform-specific directory (/var/run in linux). """ assert type(fname) == str assert errorfunc == None or callable(errorfunc) if os.name == 'nt': return try: pid_fname = os.path.join(efs2(u'/var/run'), fname) file(pid_fname, 'w').write(str(os.getpid())) except: try: pid_fname = os.path.join(efs2(u'/etc/tmp'), fname) except: if errorfunc: errorfunc("Couldn't open pid file. Continuing without one.") else: pass # just continue without reporting warning.
def getmtime(path): fspath = efs2(path) return os.path.getmtime(fspath)
def getsize(path): fspath = efs2(path) return os.path.getsize(fspath)
def isfile(path): fspath = efs2(path) return os.path.isfile(fspath)
def get_old_incomplete_data_dir(): incomplete = efs2(u'incomplete') return os.path.join(get_old_dot_dir(), incomplete)
def get_torrents_dir(): return os.path.join(get_dot_dir(), efs2(u'torrents'))
def get_incomplete_data_dir(): # 'incomplete' is a directory name and should not be localized incomplete = efs2(u'incomplete') return os.path.join(get_local_data_dir(), incomplete)
def calc_unix_dirs(): appdir = '%s-%s' % (app_name, version) ip = os.path.join(efs2(u'share'), efs2(u'pixmaps'), appdir) dp = os.path.join(efs2(u'share'), efs2(u'doc'), appdir) lp = os.path.join(efs2(u'share'), efs2(u'locale')) return ip, dp, lp
def basename(path): fspath = efs2(path) return decode_from_filesystem(os.path.basename(fspath))
def isdir(path): fspath = efs2(path) return os.path.isdir(fspath)
def exists(path): fspath = efs2(path) return os.path.exists(fspath)
def join(*args): fsargs = [efs2(arg) for arg in args] os.path.join(*fsargs)
def __init__(self, config, rawserver): self.config = config self.response_size = config['response_size'] self.max_give = config['max_give'] self.dfile = efs2(config['dfile']) self.natcheck = config['nat_check'] favicon = config['favicon'] self.favicon = None if favicon: try: h = open(favicon,'r') self.favicon = h.read() h.close() except: errorfunc(logging.WARNING, _("specified favicon file -- %s -- does not exist.") % favicon) self.rawserver = rawserver self.cached = {} # format: infohash: [[time1, l1, s1], [time2, l2, s2], [time3, l3, s3]] self.cached_t = {} # format: infohash: [time, cache] self.times = {} self.state = {} self.seedcount = {} self.save_pending = False self.parse_pending = False self.only_local_override_ip = config['only_local_override_ip'] if self.only_local_override_ip == 2: self.only_local_override_ip = not config['nat_check'] if os.path.exists(self.dfile): try: h = open(self.dfile, 'rb') ds = h.read() h.close() try: tempstate = cPickle.loads(ds) except: tempstate = bdecode(ds) # backwards-compatibility. if not tempstate.has_key('peers'): tempstate = {'peers': tempstate} statefiletemplate(tempstate) self.state = tempstate except: errorfunc(logging.WARNING, _("statefile %s corrupt; resetting") % self.dfile) self.downloads = self.state.setdefault('peers', {}) self.completed = self.state.setdefault('completed', {}) self.becache = {} # format: infohash: [[l1, s1], [l2, s2], [l3, s3]] for infohash, ds in self.downloads.iteritems(): self.seedcount[infohash] = 0 for x, y in ds.iteritems(): if not y.get('nat', -1): ip = y.get('given_ip') if not (ip and self.allow_local_override(y['ip'], ip)): ip = y['ip'] self.natcheckOK(infohash, x, ip, y['port'], y['left']) if not y['left']: self.seedcount[infohash] += 1 for infohash in self.downloads: self.times[infohash] = {} for peerid in self.downloads[infohash]: self.times[infohash][peerid] = 0 self.reannounce_interval = config['reannounce_interval'] self.save_dfile_interval = config['save_dfile_interval'] self.show_names = config['show_names'] rawserver.add_task(self.save_dfile_interval, self.save_dfile) self.prevtime = time() self.timeout_downloaders_interval = config['timeout_downloaders_interval'] rawserver.add_task(self.timeout_downloaders_interval, self.expire_downloaders) self.logfile = None self.log = None if (config['logfile'] != '') and (config['logfile'] != '-'): try: self.logfile = config['logfile'] self.log = open(self.logfile, 'a') sys.stdout = self.log print _("# Log Started: "), isotime() except: print _("**warning** could not redirect stdout to log file: "), sys.exc_info()[0] if config['hupmonitor']: def huphandler(signum, frame, self = self): try: self.log.close () self.log = open(self.logfile, 'a') sys.stdout = self.log print _("# Log reopened: "), isotime() except: print _("***warning*** could not reopen logfile") signal.signal(signal.SIGHUP, huphandler) self.allow_get = config['allow_get'] if config['allowed_dir'] != '': self.allowed_dir = config['allowed_dir'] self.parse_dir_interval = config['parse_dir_interval'] self.allowed = self.state.setdefault('allowed', {}) self.allowed_dir_files = self.state.setdefault('allowed_dir_files', {}) self.allowed_dir_blocked = {} self.parse_allowed() else: try: del self.state['allowed'] except: pass try: del self.state['allowed_dir_files'] except: pass self.allowed = None self.uq_broken = unquote('+') != ' ' self.keep_dead = config['keep_dead']
if app_dir is not None: dir_root = app_dir if dir_root is None and os.name == 'nt': tmp_dir_root = os.path.split(sys.executable)[0] if os.access(tmp_dir_root, os.R_OK|os.W_OK): dir_root = tmp_dir_root return dir_root # For string literal subdirectories, starting with unicode and then # converting to filesystem encoding may not always be necessary, but it seems # safer to do so. --Dave image_root = os.path.join(app_root, efs2(u'images')) locale_root = os.path.join(get_dot_dir(), efs2(u'locale')) no_really_makedirs(locale_root) plugin_path = [] internal_plugin = os.path.join(app_root, efs2(u'BitTorrent'), efs2(u'Plugins')) local_plugin = os.path.join(get_dot_dir(), efs2(u'Plugins')) if os.access(local_plugin, os.F_OK): plugin_path.append(local_plugin) if os.access(internal_plugin, os.F_OK): plugin_path.append(internal_plugin) if not os.access(image_root, os.F_OK) or not os.access(locale_root, os.F_OK):
def normpath(path): fspath = efs2(path) return decode_from_filesystem(os.path.normpath(fspath))
def get_nebula_file(): return os.path.join(get_dot_dir(), efs2(u'nebula'))
def realpath(path): fspath = efs2(path) return decode_from_filesystem(os.path.realpath(fspath))
def commonprefx(pathlist): fslist = [efs2(path) for path in pathlist] return decode_from_filesystem(os.path.commonprefix(fslist))
config['start_time'] = time.time() core_doneflag = DeferredEvent() ms = [] try: print args for i in xrange(NUM_PEERS): multitorrent = create_multitorrent(config, rawserver, i) multitorrent._id = i ms.append(multitorrent) for t in args: p = multitorrent.config['data_dir'] p = os.path.join(p, '%s.dat' % i) multitorrent.create_torrent_non_suck(efs2(t), efs2(p)) task.LoopingCall(print_status, ms).start(5) rawserver.listen_forever() except: for m in ms: ddir = m.config['data_dir'] if os.path.exists(ddir): shutil.rmtree(ddir) # oops, we failed. # one message for the log w/ exception info global_logger.exception("BitTorrent core initialization failed!") # one message for the user w/o info
def __init__(self, config, display, configfile_key): """Starts torrents for all .torrent files in a directory tree. All errors are logged using Python logging to 'configfile_key' logger. @param config: Preferences object storing config. @param display: output function for stats. """ # 4.4.x version of LaunchMany output exceptions to a displayer. # This version only outputs stats to the displayer. We do not use # the logger to output stats so that a caller-provided object # can provide stats formatting as opposed to using the # logger Formatter, which is specific to exceptions, warnings, and # info messages. self.logger = logging.getLogger(configfile_key) try: self.multitorrent = None self.rawserver = None self.config = config self.configfile_key = configfile_key self.display = display self.torrent_dir = efs2(config['torrent_dir']) # Ex: torrent_cache = infohash -> (path,metainfo) self.torrent_cache = {} # maps path -> [(modification time, size), infohash] self.file_cache = {} # used as set containing paths of files that do not have separate # entries in torrent_cache either because torrent_cache already # contains the torrent or because the torrent file is corrupt. self.blocked_files = {} #self.torrent_list = [] #self.downloads = {} self.hashcheck_queue = [] #self.hashcheck_store = {} self.hashcheck_current = None self.core_doneflag = DeferredEvent() self.rawserver = RawServer(self.config) try: # set up shut-down procedure before we begin doing things that # can throw exceptions. def shutdown(): self.logger.critical(_("shutting down")) if self.multitorrent: if len(self.multitorrent.get_torrents()) > 0: for t in self.multitorrent.get_torrents(): self.logger.info(_('dropped "%s"') % self.torrent_cache[t.infohash][0]) def after_mt(r): self.logger.critical("multitorrent shutdown completed. Calling rawserver.stop") self.rawserver.stop() self.logger.critical( "calling multitorrent shutdown" ) df = self.multitorrent.shutdown() #set_flag = lambda *a : self.rawserver.stop() df.addCallbacks(after_mt, after_mt) else: self.rawserver.stop() ### PROFILER POSTPROCESSING. #self.logger.critical( "Disabling profiles" ) #prof.disable() #self.logger.critical( "Running profiler post-processing" ) #stats = Stats(prof.getstats()) #stats.sort("inlinetime") #self.logger.info( "Calling stats.pprint") #stats.pprint() #self.logger.info( "After stats.pprint") ### PROFILER POSTPROCESSING # It is safe to addCallback here, because there is only one thread, # but even if the code were multi-threaded, core_doneflag has not # been passed to anyone. There is no chance of a race condition # between the DeferredEvent's callback and addCallback. self.core_doneflag.addCallback( lambda r: self.rawserver.external_add_task(0, shutdown)) self.rawserver.install_sigint_handler(self.core_doneflag) data_dir = config['data_dir'] self.multitorrent = MultiTorrent(config, self.rawserver, data_dir, resume_from_torrent_config=False) self.rawserver.add_task(0, self.scan) self.rawserver.add_task(0.5, self.periodic_check_hashcheck_queue) self.rawserver.add_task(self.config['display_interval'], self.periodic_stats) try: import signal def handler(signum, frame): self.rawserver.external_add_task(0, self.read_config) if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, handler) except Exception, e: self.logger.error(_("Could not set signal handler: ") + str_exc(e)) self.rawserver.add_task(0, self.core_doneflag.set) except UserFailure, e: self.logger.error(str_exc(e)) self.rawserver.add_task(0, self.core_doneflag.set) except: #data = StringIO() #print_exc(file = data) #self.logger.error(data.getvalue()) self.logger.exception("Exception raised while initializing LaunchMany") self.rawserver.add_task(0, self.core_doneflag.set) # always make sure events get processed even if only for # shutting down. self.rawserver.listen_forever() self.logger.info( "After rawserver.listen_forever" )
def __init__(self, config, display, configfile_key): """Starts torrents for all .torrent files in a directory tree. All errors are logged using Python logging to 'configfile_key' logger. @param config: Preferences object storing config. @param display: output function for stats. """ # 4.4.x version of LaunchMany output exceptions to a displayer. # This version only outputs stats to the displayer. We do not use # the logger to output stats so that a caller-provided object # can provide stats formatting as opposed to using the # logger Formatter, which is specific to exceptions, warnings, and # info messages. self.logger = logging.getLogger(configfile_key) try: self.multitorrent = None self.rawserver = None self.config = config self.configfile_key = configfile_key self.display = display self.torrent_dir = efs2(config['torrent_dir']) # Ex: torrent_cache = infohash -> (path,metainfo) self.torrent_cache = {} # maps path -> [(modification time, size), infohash] self.file_cache = {} # used as set containing paths of files that do not have separate # entries in torrent_cache either because torrent_cache already # contains the torrent or because the torrent file is corrupt. self.blocked_files = {} #self.torrent_list = [] #self.downloads = {} self.hashcheck_queue = [] #self.hashcheck_store = {} self.hashcheck_current = None self.core_doneflag = DeferredEvent() self.rawserver = RawServer(self.config) try: # set up shut-down procedure before we begin doing things that # can throw exceptions. def shutdown(): self.logger.critical(_("shutting down")) if self.multitorrent: if len(self.multitorrent.get_torrents()) > 0: for t in self.multitorrent.get_torrents(): self.logger.info( _('dropped "%s"') % self.torrent_cache[t.infohash][0]) def after_mt(r): self.logger.critical( "multitorrent shutdown completed. Calling rawserver.stop" ) self.rawserver.stop() self.logger.critical("calling multitorrent shutdown") df = self.multitorrent.shutdown() #set_flag = lambda *a : self.rawserver.stop() df.addCallbacks(after_mt, after_mt) else: self.rawserver.stop() ### PROFILER POSTPROCESSING. #self.logger.critical( "Disabling profiles" ) #prof.disable() #self.logger.critical( "Running profiler post-processing" ) #stats = Stats(prof.getstats()) #stats.sort("inlinetime") #self.logger.info( "Calling stats.pprint") #stats.pprint() #self.logger.info( "After stats.pprint") ### PROFILER POSTPROCESSING # It is safe to addCallback here, because there is only one thread, # but even if the code were multi-threaded, core_doneflag has not # been passed to anyone. There is no chance of a race condition # between the DeferredEvent's callback and addCallback. self.core_doneflag.addCallback( lambda r: self.rawserver.external_add_task(0, shutdown)) self.rawserver.install_sigint_handler(self.core_doneflag) data_dir = config['data_dir'] self.multitorrent = MultiTorrent( config, self.rawserver, data_dir, resume_from_torrent_config=False) self.rawserver.add_task(0, self.scan) self.rawserver.add_task(0.5, self.periodic_check_hashcheck_queue) self.rawserver.add_task(self.config['display_interval'], self.periodic_stats) try: import signal def handler(signum, frame): self.rawserver.external_add_task(0, self.read_config) if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, handler) except Exception, e: self.logger.error( _("Could not set signal handler: ") + str_exc(e)) self.rawserver.add_task(0, self.core_doneflag.set) except UserFailure, e: self.logger.error(str_exc(e)) self.rawserver.add_task(0, self.core_doneflag.set) except: #data = StringIO() #print_exc(file = data) #self.logger.error(data.getvalue()) self.logger.exception( "Exception raised while initializing LaunchMany") self.rawserver.add_task(0, self.core_doneflag.set) # always make sure events get processed even if only for # shutting down. self.rawserver.listen_forever() self.logger.info("After rawserver.listen_forever")
config, files = parse_configuration_and_args(defaults, 'bittorrent-tracker', args, 0, 0) except ValueError, e: print _("error: ") + str_exc(e) print _("run with -? for parameter explanations") return except BTFailure, e: print _("error: ") + str_exc(e) print _("run with -? for parameter explanations") return if config['dfile'] == "": config['dfile'] = decode_from_filesystem( os.path.join(platform.get_temp_dir(), efs2(u"dfile") + str(os.getpid()))) config = Preferences().initWithDict(config) ef = lambda e: errorfunc(logging.WARNING, e) platform.write_pid_file(config['pid'], ef) t = None try: r = RawServer(config) t = Tracker(config, r) try: #DEBUG print "track: create_serversocket, port=", config['port'] #END s = r.create_serversocket(config['port'], config['bind']) handler = HTTPHandler(t.get,
defaults = get_defaults('bittorrent-tracker') # hard-coded defaults. try: config, files = parse_configuration_and_args(defaults, 'bittorrent-tracker', args, 0, 0 ) except ValueError, e: print _("error: ") + str_exc(e) print _("run with -? for parameter explanations") return except BTFailure, e: print _("error: ") + str_exc(e) print _("run with -? for parameter explanations") return if config['dfile']=="": config['dfile'] = decode_from_filesystem( os.path.join(platform.get_temp_dir(), efs2(u"dfile") + str(os.getpid()))) config = Preferences().initWithDict(config) ef = lambda e: errorfunc(logging.WARNING, e) platform.write_pid_file(config['pid'], ef) t = None try: r = RawServer(config) t = Tracker(config, r) try: #DEBUG print "track: create_serversocket, port=", config['port'] #END s = r.create_serversocket(config['port'], config['bind'])
def get_old_dot_dir(): return os.path.join(get_config_dir(), efs2(u'.bittorrent'))
app_dir = get_shell_dir(shellcon.CSIDL_APPDATA) if app_dir is not None: dir_root = app_dir if dir_root is None and os.name == 'nt': tmp_dir_root = os.path.split(sys.executable)[0] if os.access(tmp_dir_root, os.R_OK | os.W_OK): dir_root = tmp_dir_root return dir_root # For string literal subdirectories, starting with unicode and then # converting to filesystem encoding may not always be necessary, but it seems # safer to do so. --Dave image_root = os.path.join(app_root, efs2(u'images')) locale_root = os.path.join(get_dot_dir(), efs2(u'locale')) no_really_makedirs(locale_root) plugin_path = [] internal_plugin = os.path.join(app_root, efs2(u'BitTorrent'), efs2(u'Plugins')) local_plugin = os.path.join(get_dot_dir(), efs2(u'Plugins')) if os.access(local_plugin, os.F_OK): plugin_path.append(local_plugin) if os.access(internal_plugin, os.F_OK): plugin_path.append(internal_plugin) if not os.access(image_root, os.F_OK) or not os.access(locale_root, os.F_OK): # we guess that probably we are installed on *nix in this case
def open(name, mode='r'): return old_open(efs2(name), mode)