def main(argv=sys.argv[1:]): """ This is the lantorrent daemon program. it mines the db for transfers that it can group together and send. Only one should be running at one time """ pylantorrent.log(logging.INFO, "enter %s" % (sys.argv[0])) # use sqlaclh to make sure the db is there x = LantorrentDB("sqlite:///%s" % pylantorrent.config.dbfile) x.close() con_str = pylantorrent.config.dbfile #con = sqlite3.connect(con_str, isolation_level="EXCLUSIVE") con = sqlite3.connect(con_str, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) done = False while not done: try: rows = getrows(con) if rows and len(rows) > 0: do_it_live(con, rows) else: time.sleep(5) except Exception, ex: pylantorrent.log(logging.ERROR, "top level error %s" % (str(ex)), traceback) con = sqlite3.connect(con_str, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
def is_done(con, rid): error_cnt = 0 while True: try: pylantorrent.log(logging.INFO, "checking for done on %s" % (rid)) done = False rc = 0 s = "select state,message,attempt_count from requests where rid = ?" data = (rid,) c = con.cursor() c.execute(s, data) rs = c.fetchone() con.commit() state = int(rs[0]) message = rs[1] attempt_count = rs[2] if state == 1: done = True elif attempt_count > 2: done = True rc = 1 if message == None: message = "too many attempts %d" % (attempt_count) con.commit() return (done, rc, message) except sqlite3.OperationalError, sqlex: error_cnt = error_cnt + 1 if error_cnt >= pylantorrent.config.db_error_max: raise sqlex time.sleep(random.random() * 2.0)
def is_done(con, rid): error_cnt = 0 while True: try: pylantorrent.log(logging.INFO, "checking for done on %s" % (rid)) done = False rc = 0 s = "select state,message,attempt_count from requests where rid = ?" data = (rid, ) c = con.cursor() c.execute(s, data) rs = c.fetchone() con.commit() state = int(rs[0]) message = rs[1] attempt_count = rs[2] if state == 1: done = True elif attempt_count > 2: done = True rc = 1 if message == None: message = "too many attempts %d" % (attempt_count) con.commit() return (done, rc, message) except sqlite3.OperationalError, sqlex: error_cnt = error_cnt + 1 if error_cnt >= pylantorrent.config.db_error_max: raise sqlex time.sleep(random.random() * 2.0)
def _send_footer(self): foot = {} foot['md5sum'] = self.md5str foot_str = json.dumps(foot) pylantorrent.log(logging.DEBUG, "sending footer %s" % (foot_str)) for v_con in self.v_con_array: v_con.send(foot_str)
def store_and_forward(self): self._read_header() header = self.json_header requests_a = header['requests'] self._open_dest_files(requests_a) destinations = header['destinations'] self._get_valid_vcons(destinations) self._process_io() # close all open files self._close_files() # read the footer from the sending machine self._read_footer() # send foot to all machines this is streaming to self._send_footer() # wait for eof and close self._close_connections() self._rename_files(requests_a) pylantorrent.log(logging.DEBUG, "All data sent %s %d" % (self.md5str, len(requests_a))) # if we got to here it was successfully written to a file # and we can call it success. Print out a success message for # everyfile written vex = LTException(0, "Success", header['host'], int(header['port']), requests_a, md5sum=self.md5str) s = vex.get_printable() self.print_results(s) self.clean_up()
def main(argv=sys.argv[1:]): """ This is the lantorrent daemon program. it mines the db for transfers that it can group together and send. Only one should be running at one time """ pylantorrent.log(logging.INFO, "enter %s" % (sys.argv[0])) # use sqlaclh to make sure the db is there x = LantorrentDB("sqlite:///%s" % pylantorrent.config.dbfile) x.close() con_str = pylantorrent.config.dbfile #con = sqlite3.connect(con_str, isolation_level="EXCLUSIVE") con = sqlite3.connect(con_str, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES) done = False while not done: try: rows = getrows(con) if rows and len(rows) > 0: do_it_live(con, rows) else: time.sleep(5) except Exception, ex: pylantorrent.log(logging.ERROR, "top level error %s" % (str(ex))) con = sqlite3.connect(con_str, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
def main(argv=sys.argv[1:]): """ This program allows a file to be requested from the lantorrent system. The file will be sent out of band. When the file has been delived the database entry for this request will be updated. This program will block until that entry is update. As options, the program takes the source file, the target file location, the group_id and the group count. The lantorrent config file must have the ip and port that the requester is using for lantorrent delivery. """ pylantorrent.log(logging.INFO, "enter") random.seed() (o, args, p) = setup_options(argv) # use sqlaclh to make sure the db is there x = LantorrentDB("sqlite:///%s" % pylantorrent.config.dbfile) x.close() con_str = pylantorrent.config.dbfile con = sqlite3.connect(con_str, isolation_level="EXCLUSIVE") rc = 0 sz = -1 done = False message = "" if o.reattach is None: (rid, sz) = request(args, con) try: (done, rc, message) = is_done(con, rid) except: done = False rc = 0 message = "Check on status later, db not ready for polling" else: rid = o.reattach if o.cancel: delete_rid(con, rid) return 0 (done, rc, message) = is_done(con, rid) if not o.nonblock and not done: (rc, message) = wait_until_sent(con, rid) done = True if done: delete_rid(con, rid) msg = "%d,%s,%s" % (rc, str(done), message) print msg # always return 0 if we echo the rc to stdout. this tells the # user to check the output for the real rc return 0
def get_incomplete(self): try: self.process_incoming_data() except Exception, ex: pylantorrent.log( logging.ERROR, "An error occurred while trying to process the data received so far: %s" % (str(ex)), traceback)
def _rename_files(self, requests_a): for req in requests_a: realname = req['filename'] rn = req['rename'] if rn: tmpname = realname + self.suffix pylantorrent.log(logging.DEBUG, "renaming %s -> %s" % (tmpname, realname)) os.rename(tmpname, realname) self.created_files.remove(tmpname)
def request(argv, con): if len(argv) < 4: raise Exception( "You must provide 4 arguments: <src file> <dst file> <a uuid for this request> <the contanct string of the receiving nodes lt server>" ) src_filename = argv[0] dst_filename = argv[1] # the user provides the rid. that way we know they have it to look # things up later if needed rid = argv[2] # get the size of the file and verify that it exists sz = os.path.getsize(src_filename) hostport = argv[3] ha = hostport.split(":") host = ha[0] if host == "": hostport = os.environ['SSH_CLIENT'] ha2 = hostport.split(" ") host = ha2[0] if len(ha) > 1: port = int(ha[1]) else: port = 2893 now = datetime.datetime.now() i = "insert into requests(src_filename, dst_filename, hostname, port, rid, entry_time, state, attempt_count) values (?, ?, ?, ?, ?, ?, ?, ?)" data = ( src_filename, dst_filename, host, port, rid, now, 0, 0, ) error_ctr = 0 while True: try: c = con.cursor() c.execute(i, data) con.commit() pylantorrent.log(logging.INFO, "new request %s %d" % (rid, sz)) return (rid, sz) except Exception, ex: pylantorrent.log(logging.ERROR, "an error occured on request %s" % str(ex)) error_ctr = error_ctr + 1 if error_ctr >= pylantorrent.config.db_error_max: raise ex time.sleep(random.random() * 2.0)
def clean_up(self, force=False): self._close_connections() self._close_files() pylantorrent.log(logging.DEBUG, "cleaning up") for f in self.created_files: try: pylantorrent.log(logging.DEBUG, "deleting file %s" % (f)) if f != "/dev/null": os.remove(f) except: pass self.created_files = []
def main(argv=sys.argv[1:]): pylantorrent.log(logging.INFO, "server starting") rc = 1 v = LTServer(sys.stdin, sys.stdout) try: v.store_and_forward() rc = 0 except LTException, ve: pylantorrent.log(logging.ERROR, "error %s" % (str(ve)), traceback) s = ve.get_printable() v.print_results(s) v.clean_up()
def do_it_live(con, rows): pylantorrent.log(logging.INFO, "lan torrent daemon setting up to send %d in a group" % (len(rows))) c = con.cursor() dests = [] last_host = None last_port = None json_dest = None rids_all = [] for r in rows: new_host = r[0] new_port = int(r[1]) dst_filename = r[3] src_filename = r[2] rid = r[4] rids_all.append(rid) sz = os.path.getsize(src_filename) # if it is the same host just tack on another dest file if new_host == last_host and last_port == new_port: reqs = json_dest['requests'] new_req = {"filename" : dst_filename, "id" : rid, 'rename' : True} reqs.append(new_req) json_dest['requests'] = reqs else: if json_dest != None: dests.append(json_dest) last_host = new_host last_port = new_port json_dest = {} json_dest['host'] = new_host json_dest['port'] = new_port json_dest['requests'] = [{"filename" : dst_filename, "id" : rid, 'rename' : True}] json_dest['block_size'] = 128*1024 json_dest['degree'] = 1 json_dest['length'] = sz if json_dest != None: dests.append(json_dest) final = {} # for the sake of code resuse this will just be piped into an # lt daemon processor. /dev/null is used to supress a local write final['requests'] = [{'filename' : "/dev/null", 'id' : str(uuid.uuid1()), 'rename' : False}] final['host'] = "localhost" final['port'] = 2893 final['block_size'] = 131072 final['degree'] = 1 final['destinations'] = dests pylantorrent.log(logging.INFO, "request send %s" % (json.dumps(final, sort_keys=True, indent=4))) pylantorrent.log(logging.INFO, "sending em!") client = LTClient(src_filename, final) v = LTServer(client, client) try: v.store_and_forward() except Exception, ex: pylantorrent.log(logging.ERROR, "an error occured on store and forward: %s" % (str(ex)), traceback)
def clean_up(self, force=False): self._close_connections() self._close_files() pylantorrent.log(logging.DEBUG, "cleaning up") for f in self.created_files: try: pylantorrent.log(logging.DEBUG, "deleting file %s" % (f)) # dont delete /dev/null (or any other dev really) ndx = f.strip().find("/dev") if ndx != 0: os.remove(f) except: pass self.created_files = []
def do_it_live(con, rows): pylantorrent.log(logging.INFO, "lan torrent daemon setting up to send %d in a group" % (len(rows))) c = con.cursor() dests = [] last_host = None last_port = None json_dest = None rids_all = [] for r in rows: new_host = r[0] new_port = int(r[1]) dst_filename = r[3] src_filename = r[2] rid = r[4] rids_all.append(rid) sz = os.path.getsize(src_filename) # if it is the same host just tack on another dest file if new_host == last_host and last_port == new_port: reqs = json_dest['requests'] new_req = {"filename" : dst_filename, "id" : rid, 'rename' : True} reqs.append(new_req) json_dest['requests'] = reqs else: if json_dest != None: dests.append(json_dest) last_host = new_host last_port = new_port json_dest = {} json_dest['host'] = new_host json_dest['port'] = new_port json_dest['requests'] = [{"filename" : dst_filename, "id" : rid, 'rename' : True}] json_dest['block_size'] = 128*1024 json_dest['degree'] = 1 json_dest['length'] = sz if json_dest != None: dests.append(json_dest) final = {} # for the sake of code resuse this will just be piped into an # lt daemon processor. /dev/null is used to supress a local write final['requests'] = [{'filename' : "/dev/null", 'id' : str(uuid.uuid1()), 'rename' : False}] final['host'] = "localhost" final['port'] = 2893 final['block_size'] = 131072 final['degree'] = 1 final['destinations'] = dests pylantorrent.log(logging.INFO, "request send %s" % (json.dumps(final, sort_keys=True, indent=4))) pylantorrent.log(logging.INFO, "sending em!") client = LTClient(src_filename, final) v = LTServer(client, client) try: v.store_and_forward() except Exception, ex: pylantorrent.log(logging.ERROR, "an error occured on store and forward: %s" % (str(ex)))
def _open_dest_files(self, requests_a): files_a = [] for req in requests_a: filename = req['filename'] try: rn = req['rename'] if rn: filename = filename + self.suffix f = open(filename, "w") files_a.append(f) self.created_files.append(filename) except Exception, ex: pylantorrent.log(logging.ERROR, "Failed to open %s" % (filename), traceback) raise LTException(503, str(ex), self.json_header['host'], int(self.json_header['port']), reqs=requests_a)
def main(argv=sys.argv[1:]): """ This program allows a file to be requested from the lantorrent system. The file will be sent out of band. When the file has been delived the database entry for this request will be updated. This program will block until that entry is update. As options, the program takes the source file, the target file location, the group_id and the group count. The lantorrent config file must have the ip and port that the requester is using for lantorrent delivery. """ pylantorrent.log(logging.INFO, "enter") random.seed() (o, args, p) = setup_options(argv) con_str = pylantorrent.config.dbfile con = sqlite3.connect(con_str, isolation_level="EXCLUSIVE") rc = 0 sz = -1 done = False message = "" if o.reattach == None: (rid, sz) = request(args, con) try: (done, rc, message) = is_done(con, rid) except: done = False rc = 0 message = "Check on status later, db not ready for polling" else: rid = o.reattach (done, rc, message) = is_done(con, rid) if not o.nonblock and not done: (rc, message) = wait_until_sent(con, rid) done = True if done: delete_rid(con, rid) msg = "%d,%s,%s" % (rc, str(done), message) pynimbusauthz.print_msg(o, 0, msg) return rc
def _process_io(self): md5er = hashlib.md5() read_count = 0 bs = self.block_size while read_count < self.data_length: if bs + read_count > self.data_length: bs = self.data_length - read_count data = self.source_conn.read_data(bs) if data == None: raise Exception("Data is None prior to receiving full file %d %d" % (read_count, self.data_length)) md5er.update(data) for v_con in self.v_con_array: v_con.send(data) for f in self.files_a: f.write(data) read_count = read_count + len(data) self.md5str = str(md5er.hexdigest()).strip() pylantorrent.log(logging.DEBUG, "We have received sent %d bytes. The md5sum is %s" % (read_count, self.md5str))
def request(argv, con): if len(argv) < 4: raise Exception("You must provide 4 arguments: <src file> <dst file> <a uuid for this request> <the contanct string of the receiving nodes lt server>") src_filename = argv[0] dst_filename = argv[1] # the user provides the rid. that way we know they have it to look # things up later if needed rid = argv[2] # get the size of the file and verify that it exists sz = os.path.getsize(src_filename) hostport = argv[3] ha = hostport.split(":") host = ha[0] if host == "": hostport = os.environ['SSH_CLIENT'] ha2 = hostport.split(" ") host = ha2[0] if len(ha) > 1: port = int(ha[1]) else: port = 2893 now = datetime.datetime.now() i = "insert into requests(src_filename, dst_filename, hostname, port, rid, entry_time, state, attempt_count) values (?, ?, ?, ?, ?, ?, ?, ?)" data = (src_filename, dst_filename, host, port, rid, now, 0, 0, ) error_ctr = 0 while True: try: c = con.cursor() c.execute(i, data) con.commit() pylantorrent.log(logging.INFO, "new request %s %d" % (rid, sz)) return (rid, sz) except Exception, ex: pylantorrent.log(logging.ERROR, "an error occured on request %s" % str(ex)) error_ctr = error_ctr + 1 if error_ctr >= pylantorrent.config.db_error_max: raise ex time.sleep(random.random() * 2.0)
def _process_io(self): md5er = hashlib.md5() read_count = 0 bs = self.block_size while read_count < self.data_length: if bs + read_count > self.data_length: bs = self.data_length - read_count data = self.source_conn.read_data(bs) if data == None: raise Exception( "Data is None prior to receiving full file %d %d" % (read_count, self.data_length)) md5er.update(data) for v_con in self.v_con_array: v_con.send(data) for f in self.files_a: f.write(data) read_count = read_count + len(data) self.md5str = str(md5er.hexdigest()).strip() pylantorrent.log( logging.DEBUG, "We have received sent %d bytes. The md5sum is %s" % (read_count, self.md5str))
def getrows(con): c = con.cursor() tm = datetime.datetime.now() - datetime.timedelta( 0, pylantorrent.config.insert_delay) s = "select distinct src_filename from requests where state = 0 and attempt_count < 3 and entry_time < ? order by entry_time limit 1" data = (tm, ) c.execute(s, data) r = c.fetchone() if r == None or len(r) < 1: return None src_file = r[0] # do a commit here. THe assumption is that jsut one daemon is pulling # from the db. better to grab any that came in since the initial # select con.commit() pylantorrent.log(logging.INFO, "selected rows with fname %s" % (src_file)) # need to find a way to determine that nothing new has been added for this # file recently #s = 'select max(entry_time) as "e [timestamp]" from requests where src_filename = ? and state = 0 and attempt_count < 3' #data = (src_file, ) #done = False #while not done: # c.execute(s, data) # row = c.fetchone() # con.commit() # td = datetime.datetime.now() - datetime.timedelta(0, 2) # if row[0] < td: # done = True # else: # time.sleep(0.1) s = "select hostname,port,src_filename,dst_filename,rid from requests where src_filename = ? and state = 0 and attempt_count < 3 order by hostname,port" data = (src_file, ) c.execute(s, data) rows = c.fetchall() con.commit() return rows
def getrows(con): c = con.cursor() tm = datetime.datetime.now() - datetime.timedelta(0, pylantorrent.config.insert_delay) s = "select distinct src_filename from requests where state = 0 and attempt_count < 3 and entry_time < ? order by entry_time limit 1" data = (tm, ) c.execute(s, data) r = c.fetchone() if r == None or len(r) < 1: return None src_file = r[0] # do a commit here. THe assumption is that jsut one daemon is pulling # from the db. better to grab any that came in since the initial # select con.commit() pylantorrent.log(logging.INFO, "selected rows with fname %s" % (src_file)) # need to find a way to determine that nothing new has been added for this # file recently #s = 'select max(entry_time) as "e [timestamp]" from requests where src_filename = ? and state = 0 and attempt_count < 3' #data = (src_file, ) #done = False #while not done: # c.execute(s, data) # row = c.fetchone() # con.commit() # td = datetime.datetime.now() - datetime.timedelta(0, 2) # if row[0] < td: # done = True # else: # time.sleep(0.1) s = "select hostname,port,src_filename,dst_filename,rid from requests where src_filename = ? and state = 0 and attempt_count < 3 order by hostname,port" data = (src_file, ) c.execute(s, data) rows = c.fetchall() con.commit() return rows
def store_and_forward(self): self._read_header() header = self.json_header requests_a = header['requests'] self._open_dest_files(requests_a) destinations = header['destinations'] self._get_valid_vcons(destinations) self._process_io() # close all open files self._close_files() # read the footer from the sending machine self._read_footer() # send foot to all machines this is streaming to self._send_footer() # wait for eof and close self._close_connections() self._rename_files(requests_a) pylantorrent.log( logging.DEBUG, "All data sent %s %d" % (self.md5str, len(requests_a))) # if we got to here it was successfully written to a file # and we can call it success. Print out a success message for # everyfile written vex = LTException(0, "Success", header['host'], int(header['port']), requests_a, md5sum=self.md5str) s = vex.get_printable() self.print_results(s) self.clean_up()
def do_it_live(con, rows): pylantorrent.log( logging.INFO, "lan torrent daemon setting up to send %d in a group" % (len(rows))) c = con.cursor() dests = [] last_host = None last_port = None json_dest = None rids_all = [] for r in rows: new_host = r[0] new_port = int(r[1]) dst_filename = r[3] src_filename = r[2] rid = r[4] rids_all.append(rid) sz = os.path.getsize(src_filename) # if it is the same host just tack on another dest file if new_host == last_host and last_port == new_port: reqs = json_dest['requests'] new_req = {"filename": dst_filename, "id": rid, 'rename': True} reqs.append(new_req) json_dest['requests'] = reqs else: if json_dest != None: dests.append(json_dest) last_host = new_host last_port = new_port json_dest = {} json_dest['host'] = new_host json_dest['port'] = new_port json_dest['requests'] = [{ "filename": dst_filename, "id": rid, 'rename': True }] json_dest['block_size'] = 128 * 1024 json_dest['degree'] = 1 json_dest['length'] = sz if json_dest != None: dests.append(json_dest) final = {} # for the sake of code resuse this will just be piped into an # lt daemon processor. /dev/null is used to supress a local write final['requests'] = [{ 'filename': "/dev/null", 'id': str(uuid.uuid1()), 'rename': False }] final['host'] = "localhost" final['port'] = 2893 final['block_size'] = 131072 final['degree'] = 1 final['destinations'] = dests pylantorrent.log( logging.INFO, "request send %s" % (json.dumps(final, sort_keys=True, indent=4))) pylantorrent.log(logging.INFO, "sending em!") client = LTClient(src_filename, final) v = LTServer(client, client) v.store_and_forward() rc = 0 es = client.get_incomplete() bad_rid = [] for k in es: rc = rc + 1 e = es[k] pylantorrent.log(logging.ERROR, "error trying to send %s" % (str(e))) rid = e['id'] bad_rid.append(rid) # set to retry u = "update requests set state = ?, message = ?, attempt_count = attempt_count + 1 where rid = ?" data = ( 0, str(e), rid, ) c.execute(u, data) rids_all.remove(rid) for rid in rids_all: # set to compelte u = "update requests set state = ?, message = ? where rid = ?" data = ( 1, "Success", rid, ) c.execute(u, data) con.commit() if len(bad_rid) > 0: # wait for soemthing in the system to change # obviously we need something more sophisticated than this # eventually time.sleep(5) return rc
def main(argv=sys.argv[1:]): pylantorrent.log(logging.INFO, "server starting")
def get_incomplete(self): try: self.process_incoming_data() except Exception, ex: pylantorrent.log(logging.ERROR, "An error occurred while trying to process the data received so far: %s" % (str(ex)), traceback)
def read(self, blocksize=1): pylantorrent.log(logging.DEBUG, "begin reading.... pau is %s" % (str(self.pau))) if self.pau: pylantorrent.log(logging.DEBUG, "is pau") return None pylantorrent.log(logging.DEBUG, "reading.... ") if self.file_data: d = self.data_file.read(blocksize) if not d: pylantorrent.log(logging.DEBUG, "no mo file data") self.file_data = False else: pylantorrent.log(logging.DEBUG, "### data len = %d" % (len(d))) self.md5er.update(d) return d pylantorrent.log(logging.DEBUG, "check footer") if not self.file_data: pylantorrent.log(logging.DEBUG, "getting footer") foot = {} self.md5str = str(self.md5er.hexdigest()).strip() foot['md5sum'] = self.md5str d = json.dumps(foot) pylantorrent.log(logging.DEBUG, "getting footer is now %s" % (d)) self.pau = True return d
pylantorrent.log(logging.INFO, "request send %s" % (json.dumps(final, sort_keys=True, indent=4))) pylantorrent.log(logging.INFO, "sending em!") client = LTClient(src_filename, final) v = LTServer(client, client) try: v.store_and_forward() except Exception, ex: pylantorrent.log(logging.ERROR, "an error occured on store and forward: %s" % (str(ex)), traceback) rc = 0 es = client.get_incomplete() bad_rid = [] for k in es: rc = rc + 1 e = es[k] pylantorrent.log(logging.ERROR, "error trying to send %s" % (str(e))) rid = e['id'] bad_rid.append(rid) # set to retry u = "update requests set state = ?, message = ?, attempt_count = attempt_count + 1 where rid = ?" data = (0,str(e),rid,) c.execute(u, data) rids_all.remove(rid) for rid in rids_all: # set to compelte u = "update requests set state = ?, message = ? where rid = ?" data = (1,"Success",rid,) c.execute(u, data) con.commit()
pylantorrent.log(logging.INFO, "request send %s" % (json.dumps(final, sort_keys=True, indent=4))) pylantorrent.log(logging.INFO, "sending em!") client = LTClient(src_filename, final) v = LTServer(client, client) try: v.store_and_forward() except Exception, ex: pylantorrent.log(logging.ERROR, "an error occured on store and forward: %s" % (str(ex))) rc = 0 es = client.get_incomplete() bad_rid = [] for k in es: rc = rc + 1 e = es[k] pylantorrent.log(logging.ERROR, "error trying to send %s" % (str(e))) rid = e['id'] bad_rid.append(rid) # set to retry u = "update requests set state = ?, message = ?, attempt_count = attempt_count + 1 where rid = ?" data = (0,str(e),rid,) c.execute(u, data) rids_all.remove(rid) for rid in rids_all: # set to compelte u = "update requests set state = ?, message = ? where rid = ?" data = (1,"Success",rid,) c.execute(u, data) con.commit()
def print_results(self, s): pylantorrent.log(logging.DEBUG, "printing\n--------- \n%s\n---------------" % (s)) self.outf.write(s) self.outf.flush()
def main(argv=sys.argv[1:]): pylantorrent.log(logging.INFO, "server starting") rc = 1 v = LTServer(sys.stdin, sys.stdout) try: v.store_and_forward() rc = 0 except LTException, ve: pylantorrent.log(logging.ERROR, "error %s" % (str(ve)), traceback) s = ve.get_printable() v.print_results(s) v.clean_up() except Exception, ex: pylantorrent.log(logging.ERROR, "error %s" % (str(ex)), traceback) vex = LTException(500, str(ex)) s = vex.get_printable() v.print_results(s) v.clean_up() return rc if __name__ == "__main__": if 'LANTORRENT_HOME' not in os.environ: msg = "The env LANTORRENT_HOME must be set" print msg raise Exception(msg) rc = main()
def print_results(self, s): pylantorrent.log(logging.DEBUG, "printing\n--------- \n%s\n---------------" % (s))
def do_it_live(con, rows): pylantorrent.log(logging.INFO, "lan torrent daemon setting up to send %d in a group" % (len(rows))) c = con.cursor() dests = [] last_host = None last_port = None json_dest = None rids_all = [] for r in rows: new_host = r[0] new_port = int(r[1]) dst_filename = r[3] src_filename = r[2] rid = r[4] rids_all.append(rid) sz = os.path.getsize(src_filename) # if it is the same host just tack on another dest file if new_host == last_host and last_port == new_port: reqs = json_dest['requests'] new_req = {"filename" : dst_filename, "id" : rid, 'rename' : True} reqs.append(new_req) json_dest['requests'] = reqs else: if json_dest != None: dests.append(json_dest) last_host = new_host last_port = new_port json_dest = {} json_dest['host'] = new_host json_dest['port'] = new_port json_dest['requests'] = [{"filename" : dst_filename, "id" : rid, 'rename' : True}] json_dest['block_size'] = 128*1024 json_dest['degree'] = 1 json_dest['length'] = sz if json_dest != None: dests.append(json_dest) final = {} # for the sake of code resuse this will just be piped into an # lt daemon processor. /dev/null is used to supress a local write final['requests'] = [{'filename' : "/dev/null", 'id' : str(uuid.uuid1()), 'rename' : False}] final['host'] = "localhost" final['port'] = 2893 final['block_size'] = 131072 final['degree'] = 1 final['destinations'] = dests pylantorrent.log(logging.INFO, "request send %s" % (json.dumps(final, sort_keys=True, indent=4))) pylantorrent.log(logging.INFO, "sending em!") client = LTClient(src_filename, final) v = LTServer(client, client) v.store_and_forward() rc = 0 es = client.get_incomplete() bad_rid = [] for k in es: rc = rc + 1 e = es[k] pylantorrent.log(logging.ERROR, "error trying to send %s" % (str(e))) rid = e['id'] bad_rid.append(rid) # set to retry u = "update requests set state = ?, message = ?, attempt_count = attempt_count + 1 where rid = ?" data = (0,str(e),rid,) c.execute(u, data) rids_all.remove(rid) for rid in rids_all: # set to compelte u = "update requests set state = ?, message = ? where rid = ?" data = (1,"Success",rid,) c.execute(u, data) con.commit() if len(bad_rid) > 0: # wait for soemthing in the system to change # obviously we need something more sophisticated than this # eventually time.sleep(5) return rc
def main(argv=sys.argv[1:]): pylantorrent.log(logging.INFO, "server starting") rc = 1 v = LTServer(sys.stdin, sys.stdout) try: v.store_and_forward() rc = 0 except LTException, ve: pylantorrent.log(logging.ERROR, "error %s" % (str(ve)), traceback) s = ve.get_printable() v.print_results(s) v.clean_up() except Exception, ex: pylantorrent.log(logging.ERROR, "error %s" % (str(ex)), traceback) vex = LTException(500, str(ex)) s = vex.get_printable() v.print_results(s) v.clean_up() return rc if __name__ == "__main__": if 'LANTORRENT_HOME' not in os.environ: msg = "The env LANTORRENT_HOME must be set" print msg raise Exception(msg) rc = main() sys.exit(rc)