Beispiel #1
0
 def __send_file_chunks( self ):
    """
    Call repeatedly in the ifttransmit main loop to send chunks.
    
    This will be called once prepare_transmit and possibly send_job have been called
    
    Return an event to be handled by ifttransmit
    """
    
    if self.ready_to_send == False or self.transmit_state != TRANSMIT_STATE_CHUNKS:
       return (0,E_BAD_STATE)
    
    # can we do anything?
    if self.ift_job == None and self.ready_to_send == True:
       iftlog.log(5, self.name + ": No job to process!  Use my assign_job() method and resume me")
       self.ready_to_send = False
       return (0,E_BAD_STATE)
    
 
    chunk = None
    chunk_id = -1
    rc = 0
    
    chunk, chunk_id, chunk_path, remote_chunk_path = self.__next_chunk()
    
    try:
       rc = self.send_one_chunk( chunk, chunk_id, chunk_path, remote_chunk_path )
    except Exception, inst:
       iftlog.exception( self.name + ": could not send data", inst )
       self.close_connection( TRANSMIT_STATE_FAILURE )
       
       t = time.time()
       iftstats.log_chunk( self.ift_job, self.name, False, t, t, 0 )
       
       return (PROTO_MSG_ERROR_FATAL, E_NO_DATA)
Beispiel #2
0
def get_reusable_data( proto_name, data_key, do_copy=True ):
   """
   Get a (deep) copy of a piece of reusable transmitter data, given the name of the receiver.
   
   @arg proto_name
      Name of the protocol
   
   @arg data_key
      either REUSABLE_DATA_SETUP_ARGS or REUSABLE_DATA_RESPAWN
      
   Return the data on success, or None on error.
   """
   
   if __reusable_data.has_key(proto_name) == False:
      return {}
   
   if __reusable_data[proto_name].has_key(data_key) == False:
      return {}
   
   try:
      if do_copy:
         return copy.deepcopy(__reusable_data[ proto_name ][ data_key ])
      else:
         return __reusable_data[ proto_name ][ data_key ]
   except Exception, inst:
      iftlog.exception( "Could not get re-instantiation data for transmitter " + proto_name, inst )
      return None
Beispiel #3
0
 def recv_files( self, remote_file_paths, local_file_dir ):
    global cache_dir
    
    # get the file from the cache and write it to disk, if possible
    file_fd = cache_get_file( cache_path(self.file_to_recv), self.max_age, self.connect_args, self.job_attrs, self.squid_port, self.http_port )
    
    if file_fd == None:
       iftlog.log(3, self.name + ": could not receive " + self.file_to_recv )
       self.recv_finished( TRANSMIT_STATE_FAILURE )
       return E_NO_DATA      # not in cache ==> protocol failure
    
    data = file_fd.read()
    file_fd.close()
    
    tmp_file_name = ""
    
    
    try:
       tmp_file_name = local_file_dir + "/" + os.path.basename( self.job_attrs.get( iftfile.JOB_ATTR_DEST_NAME ) )
       fd = open( tmp_file_name, "wb" )
       fd.write( data )
       fd.close()
    except Exception, inst:
       iftlog.exception(self.name + ".recv_files: failed to save " + self.file_to_recv + " to " + tmp_file_name, inst)
       return E_IOERROR
Beispiel #4
0
   def get_unwritten_chunks(self):
      """
      Get a list of the chunks that have not yet been received.
      Note: this list will possibly be outdated as soon as it is returned!
      """
      try:
         # sanity check
         if self.__mode != MODE_WRITE:
            self.__error = E_BAD_MODE
            return E_BAD_MODE
         
         index = 0
         ret = []
         for i in xrange(0, len(self.__chunk_mask)):
            if self.__chunk_reservations[i] >= time.time():
               continue

            try:
               chunk = self.__chunk_mask[i]
               if chunk == False:
                  ret.append( i )
            except:
               break
         
         if len(ret) == 0 and self.known_size == False:
            # if we don't know the size, add the chunks required to reach __bytes_max
            ret.append( len(self.__chunk_mask) )
         
         return ret
      except Exception, inst:
         iftlog.exception("iftfile.get_unwritten_chunks", inst)
         return []
Beispiel #5
0
def make_chunks_dir( filename, filehash ):
   """
   Make a directory from the filename and filehash to store incoming chunks into.
   """
   if filename[0] == "/":
      filename = filename[1:]
   
   file_dir = os.path.basename( filename ) + "." + str(filehash)
      
   if os.path.exists( os.path.join(__file_chunks_dir, file_dir) ):
      iftlog.log(3, "WARNING: " + os.path.join(__file_chunks_dir, file_dir) + " exists!")
      try:
         os.popen("rm -rf " + os.path.join(__file_chunks_dir, file_dir) + "/*").close()
      except:
         pass
         
      return 0    # already done!
   
   chunk_dir = get_chunks_dir( filename, filehash )
   try:
      rc = os.popen("mkdir -p " + chunk_dir ).close()
      if rc != None:
         iftlog.log(5, "iftfile: could not make chunk directory " + chunk_dir )
         return E_IOERROR
      
      return 0
   except Exception, inst:
      iftlog.exception( "iftfile: could not make chunk directory " + chunk_dir )
      return E_IOERROR
Beispiel #6
0
 def read_next_chunk( self ):
    """
    Read the next chunk of this file
    Call fopen() before calling this.
    Return a chunk ID and a chunk with length 0 or more (0 indicates EOF)
    Return negative and None if the file isn't open
    """
    
    if self.__mode != MODE_READ:
       # can't read if we aren't in READ mode
       self.__error = E_BAD_MODE
       return (None, E_BAD_MODE)
    
    if not os.path.exists(self.path):
       self.__error = E_IOERROR
       return (None, E_IOERROR)
    
    self.__read_lock.acquire()
    
    fd = None
    bytes = None
    
    try:
       fd = open(self.path, "r")
       fd.seek( self.__next_chunk * self.__chunk_size )
       bytes = fd.read( self.__chunk_size )
       fd.close()
    except Exception, inst:
       iftlog.exception( "iftfile: could not open " + self.path, inst)
       self.__error = E_IOERROR
       self.__read_lock.release()
       return (None, E_IOERROR)
Beispiel #7
0
   def recv_job( self, job ):
      self.job_attrs = copy.deepcopy( job.attrs )    # get a reference so we can check out other data other protocols add 
      
      
      # try to get sender information, such as server version
      try:
         self.remote_host = job.get_attr( iftfile.JOB_ATTR_SRC_HOST ).strip()
         
         # remove http:// and/or ftp:// and/or https://
         if self.remote_host.find( "http://" ) == 0:
            self.remote_host = self.remote_host.lstrip( "http://" )
         
         if self.remote_host.find( "ftp://" ) == 0:
            self.remote_host = self.remote_host.lstrip( "ftp://" )
            
         if self.remote_host.find( "https://" ) == 0:
            self.remote_host = self.remote_host.lstrip( "http://" )
         
         self.file_size = -1
         if self.job_attrs and self.job_attrs.has_key( iftfile.JOB_ATTR_FILE_SIZE ):
            self.file_size = self.job_attrs[ iftfile.JOB_ATTR_FILE_SIZE ]
         
         # figure out how big the file is
         server_version = 10
         if self.file_size == -1:
            self.file_size, server_version = self.get_remote_file_attrs( job.get_attr( iftfile.JOB_ATTR_SRC_HOST ), self.connect_args[ iftproto.PROTO_PORTNUM ], job.get_attr( iftfile.JOB_ATTR_SRC_NAME ) )
         else:
            fs, server_version = self.get_remote_file_attrs( job.get_attr( iftfile.JOB_ATTR_SRC_HOST ), self.connect_args[ iftproto.PROTO_PORTNUM ], job.get_attr( iftfile.JOB_ATTR_SRC_NAME ) )
            if int(fs) != int(self.file_size):
               # problem--the file doesn't have the right size!
               iftlog.log(5, self.name + ": ERROR: given file size (" + str(self.file_size) + ") does not match server's file size (" + str(fs) + ").  -1 means got 404")
               return E_INVAL

         self.file_size = int(self.file_size)
         if self.file_size < 0:
            # error--couldn't stat remote file
            iftlog.log(5, self.name + ": WARNING: could not determine remote file size") 
            
         else:
            if not job.get_attr( iftfile.JOB_ATTR_FILE_SIZE ):
               job.set_attr( iftfile.JOB_ATTR_FILE_SIZE, self.file_size )
               self.job_attrs[ iftfile.JOB_ATTR_FILE_SIZE ] = self.file_size
             
            if server_version == 11:
               # we can do chunking internally
               self.set_chunking_mode( iftproto.PROTO_DETERMINISTIC_CHUNKING )
               print self.name + ": deterministic chunking activated"
            else:
               # old HTTP server, no chunking
               self.set_chunking_mode( iftproto.PROTO_NO_CHUNKING )
               print self.name + ": chunking deactivated"
                     
            #if self.http_version == 11:      # supports Range header
            #   self.use_chunking = True
            iftlog.log(1, self.name + ": expected file size of " + str(self.file_size) )
         return 0
         
      except Exception, inst:
         iftlog.exception( "http: could not await_sender", inst)
         return E_UNHANDLED_EXCEPTION
Beispiel #8
0
 def recv_file( self, remote_chunk_dir, desired_chunks ):
    print "recv_file"
    chunk_dict = {}
    # get each chunk from the remote host
    try:
       
       for chunk in desired_chunks:
          connection = urllib2.Request( "http://" + self.remote_host + ":" + str(self.connect_args[iftproto.PROTO_PORTNUM]) + os.path.join( remote_chunk_dir, str(chunk) ) )
          response = urllib2.urlopen( connection )
          
          if response.code == 200:
             # got chunk
             chunk_dict[chunk] = response.read()
          else:
             iftlog.log(3, self.name + ": WARNING: could not get chunk " + str(chunk) + ", status = " + str(response.code) )
          
          response.close()
       
       if chunk_dict == {}:
          return (E_NO_DATA, None)
       
       return (0, chunk_dict)
    except Exception, inst:
       iftlog.exception(self.name + ": ERROR: could not get all chunks from " + str(self.remote_host) + " in " + str(remote_chunk_dir), inst)
       return (E_NO_CONNECT, None)
Beispiel #9
0
 def await_sender( self, connection_attrs, timeout ):
    if connection_attrs != None:
       p = connection_attrs.get( PROTO_PORTNUM )
       if p != None:
          try:
             self.port = int(p)
          except:
             pass
    
    # set up a server socket to the remote host
    self.soc = None
    self.connected = False
    
    if connection_attrs.get( IFTSOCKET_TIMEOUT ) != None:
       self.timeout = connection_attrs.get( IFTSOCKET_TIMEOUT )
    else:
       self.timeout = 1
       
    if self.port != None:
       
       try:
          self.soc = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
          self.soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
          self.soc.settimeout( self.timeout )      # use the given timeout
          self.soc.bind( ("localhost", self.port) )
          self.soc.listen(1)      # only one remote host should talk to me
          iftlog.log(1, "iftsocket_receiver: Listening on localhost:" + str(self.port) )
       except Exception, inst:
          iftlog.exception( "iftsocket_receiver: could not set up server socket", str(inst) )
          return E_NO_CONNECT
       
       return 0
Beispiel #10
0
 def do_GET(self):
    # file request?
    try:
       
       # which pieces were requested?
       if True or os.path.dirname( self.path.strip() ) in self.server.available_files:
          if self.path.endswith("done"):
             # receiver is done with this file
             #del self.server.available_files[ os.path.dirname( self.path.strip() ) ]
             self.send_response( 200 )
          
          else:
             # file has been made available
             SimpleHTTPRequestHandler.do_GET(self)
          
          return
       
       else:
          self.send_response(404)    # nothing to send
          return
    
    except Exception, inst:
       iftlog.exception( "Could not fully transmit " + self.path, inst)
       self.send_response(404)
       return
Beispiel #11
0
   def unlock_chunk( self, owner, chunk_id ):
      """
      Unlock a chunk.
      Only valid for MODE_WRITE
      Return 0 on success; negative on failure
      """
      
      try:
         # sanity check
         if self.__mode != MODE_WRITE:
            self.__error = E_BAD_MODE
            return E_BAD_MODE
            
         if chunk_id < 0 or chunk_id >= self.__num_chunks:
            self.__error = E_INVAL
            return E_OVERFLOW
         
         if self.__chunk_owners[ chunk_id ] == owner:
            self.__chunk_reservations[ chunk_id ] = 0
            self.__chunk_owners[ chunk_id ] = None
            self.__chunk_locks[ chunk_id ].release()
            return 0

         return E_INVAL
      except Exception, inst:
         if self.__open == False:
            iftlog.log("iftfile.unlock_chunk: file is no longer open")
            
         iftlog.exception("iftfile.unlock_chunk: could not unlock " + str(chunk_id))
         return E_BAD_STATE
Beispiel #12
0
 def send_job( self, job ):
    # this file is to be made available
    try:
       self.http_server.available_files.append( job.get_attr( iftfile.JOB_ATTR_SRC_CHUNK_DIR ) )
       return 0
    except Exception, inst:
       iftlog.exception( self.name + ": could not make " + str(job.get_attr( iftfile.JOB_ATTR_SRC_CHUNK_DIR )) + " available", inst)
       return E_UNHANDLED_EXCEPTION
Beispiel #13
0
def import_package(name):
    try:
        __import__(name)
        iftlog.log(0, "iftloader: Loaded " + name)
        return 0
    except Exception, inst:
        iftlog.exception("iftloader: could not load " + name, inst)
        return -1
Beispiel #14
0
  def recv_chunks( self, remote_chunk_dir, desired_chunks ):
     
     # determine what has been received since the last time this was called
     chunk_list = self.get_chunk_list()
     active_set = set( chunk_list )
     
     # wait until we actually receive something
     while len(active_set) - len(self.recv_prev) == 0:
        time.sleep(0.5)
        active_set = active_set | set( self.get_chunk_list() )
        
        if self.torrent_handle.is_seed():
           # we have all chunks
           active_set = set([i for i in xrange(0, self.torrent_handle.status().num_pieces)])
           break
        
        
        
        s = self.torrent_handle.status()
        state_str = ['queued', 'checking', 'downloading metadata', \
               'downloading', 'finished', 'seeding', 'allocating']
        print '%.2f%% complete (down: %.1f kb/s up: %.1f kB/s peers: %d) %s' % \
               (s.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \
               s.num_peers, state_str[s.state])
 
        
        if not iftapi.is_alive():
           return E_FAILURE
        continue
     
     # indicate what we have received
     new_chunks = active_set - self.recv_prev
     
     iftlog.log(3, self.name + ": received " + str(len(new_chunks)) + " more chunks")
    
     self.recv_prev = active_set
        
     print "have now received " + str(len(self.recv_prev)) + " chunks" 
     # convert to dictionary
     bt_dir = self.torrent_handle.save_path()
     rc = 0
     for chunk_id in new_chunks:
        file_slices = self.torrent_info.map_block( chunk_id, 0, self.chunksize )    # which file(s) did this chunk correspond to?
        for fs in file_slices:
           recv_file = self.torrent_info.file_at( fs.file_index )
           try:
              chunk_fd = open( bt_dir + "/" + recv_file.path, "r")
              chunk_fd.seek( fs.offset )
              chunk_data = chunk_fd.read( self.chunksize )
              if chunk_data:
                 self.add_chunk( chunk_id, chunk_data )
              chunk_fd.close()
              print "copy chunk " + str(chunk_id) + " from " + str(recv_file.path) + " at offset " + str(fs.offset) + ", length " + str(len(chunk_table[chunk_id])) + " (chunksize is " + str(self.chunksize) + ")"
           except Exception, inst:
              iftlog.exception( self.name + ": could not get chunk " + str(chunk_id) + " from " + str(recv_file) + " at offset " + str(fs.offset), inst)
              rc = E_IOERROR
              continue
Beispiel #15
0
 def __write_file( self, path, bytes ):
    try:
       fd = open( path, "wb" )
       fd.write( bytes )
       fd.close()
       return 0
    except Exception, inst:
       iftlog.exception( self.name + ".__write_file: could not write to " + path, inst )
       return E_IOERROR
Beispiel #16
0
 def lock_chunk( self, owner, chunk_id, override=False, t=1.0 ):
    """
    Lock a chunk for writing.
    No other threads can access it.
    Only valid for MODE_WRITE
    Blocks, and returns
    """
    
    try:
       # sanity check
       if self.marked_complete:
          self.__error = E_COMPLETE
          return E_COMPLETE
       
       if self.__mode != MODE_WRITE:
          self.__error = E_BAD_MODE
          return E_BAD_MODE
       
       # if we know how many chunks there are, then lock it as usual
       if self.known_size == True:
          if chunk_id < 0 or chunk_id >= self.__num_chunks:
             self.__error = E_INVAL
             return E_INVAL
       else:
          # make a new entry if we need to and lock it
          if chunk_id < 0:
             self.__error = E_INVAL
             return E_INVAL
      
          self.__grow_metadata( chunk_id )
       
       # can't lock the chunk if it already has data
       if self.__chunk_mask[ chunk_id ]:
          return E_DUPLICATE
       
       self.__chunk_locks[ chunk_id ].acquire()
       
       # we're takin' over
       if override:
          self.__chunk_reservations[ chunk_id ] = time.time() + t
          self.__chunk_owners[ chunk_id ] = owner
       
       # sanity check again (in case this was called after fclose())
       if self.__mode != MODE_WRITE:
          self.__error = E_BAD_MODE
          self.__chunk_locks[ chunk_id ].release()
          return E_BAD_MODE
       
       # when we return, this thread holds the lock
       return 0
    except Exception, inst:
       if self.__open == False:
          iftlog.log("iftfile.lock_chunk: file is no longer open")
          
       # should only happen if the file gets closed by another thread
       iftlog.exception("iftfile.lock_chunk: could not lock chunk " + str(chunk_id))
       return E_BAD_STATE
Beispiel #17
0
def delete_proto_instance(handle):
    if __loaded_proto_instances.has_key(handle) == False:
        return E_NO_VALUE

    try:
        del __loaded_proto_instances[handle]
        return 0
    except Exception, inst:
        iftlog.exception("iftloader: could not delete protocol instance", inst)
        return E_UNHANDLED_EXCEPTION
Beispiel #18
0
 def fpurge(self):
    """
    Erase all data we've retained.  Do so atomically.
    """
    self.__expand_lock.acquire()
    self.__open = False
    try:
       os.remove( self.path)
    except Exception, inst:
       iftlog.exception("iftfile: could not fpurge " + self.path, inst)
Beispiel #19
0
 def setup( self, setup_attrs ):
    try:
       self.port = setup_attrs[ PROTO_PORTNUM ]
       server_addr = ('', self.port)
       self.http_server = ift_http_server( server_addr, ift_http_request_handler )
       
       thread.start_new_thread( self.http_server.serve_forever, () )
       return 0
    except Exception, inst:
       iftlog.exception( "http_sender.setup: could not start HTTP server", inst )
       return E_NO_CONNECT
Beispiel #20
0
 def set_chunk( self, chunk, chunk_id, trunicate=True, strict=False ):
    """
    Receive a chunk to be written.
    No trunication will happen unless trunicate = True
    Chunk does not need to be exactly the right size unless strict = True
    CALL ONLY FOR LOCKED CHUNKS
    Return 0 on success
    Return E_INVAL if there is already a chunk for this id, or if the id is out of range
    Return E_BAD_MODE if we're in READ mode
    Return E_OVERFLOW if the upper bound of the known file size has been reached
    Return E_UNDERFLOW if the chunk was too small and strict == True
    """
    
    try:
       
       if self.marked_complete:
          self.__error = E_COMPLETE
          return E_COMPLETE
       
       if self.__mode != MODE_WRITE:
          # can't write if we aren't in WRITE mode
          self.__error = E_BAD_MODE
          return E_BAD_MODE
       
       if self.__bytes_max > 0 and self.__bytes_written + len(chunk) > self.__bytes_max:
          # problem--got too much data
          iftlog.log(3, "iftfile: attempted to write " + str(len(chunk)) + " more bytes beyond required maximum of " + str(self.__bytes_max) + ", will not write chunk " + str(chunk_id))
          return E_OVERFLOW
       
       if trunicate and len(chunk) > self.__chunk_size:
          iftlog.log(3, "iftfile: got chunk of length " + str(len(chunk)) + ", trunicating to " + str(self.__chunk_size))
          chunk = chunk[0 : self.__chunk_size-1]
       
       if len(chunk) < self.__chunk_size and strict:
          return E_UNDERFLOW
       
       
       # write to disk
       try:
          fd = open(self.path, "ab")
          fd.seek( self.__chunk_size * chunk_id )
          fd.write( chunk )
          fd.close()
       except Exception, inst:
          iftlog.exception("iftfile.set_chunk: could not write to " + self.path, inst)
          return E_IOERROR
 
       # do not receive this chunk again
       rc = self.mark_chunk( chunk_id, True )
       if rc != 0:
          iftlog.log(3, "iftfile: will not write chunk " + str(chunk_id))
          return rc
       
       return 0
Beispiel #21
0
 def prepare_transmit( self, job ):
    # create the .torrent file from the given file
    fs = lt.file_storage()
    
    lt.add_files( fs, job.get_attr( iftfile.JOB_ATTR_SRC_NAME ) )
    
    ct = lt.create_torrent( fs, job.get_attr( iftfile.JOB_ATTR_CHUNKSIZE ) )
    
    ct.set_creator("iftd: " + self.name )
    
    self.file_to_send = job.get_attr( iftfile.JOB_ATTR_SRC_NAME )
    
    # if we were given a tracker or list of trackers, add them
    if job.get_attr( IFTBITTORRENT_TRACKER ) != None:
       if type(job.get_attr( IFTBITTORRENT_TRACKER )) == str:
          ct.add_tracker( job.get_attr( IFTBITTORRENT_TRACKER ), 0 )
       
       if type(job.get_attr( IFTBITTORRENT_TRACKER )) == list:
          for tracker in job.get_attr( IFTBITTORRENT_TRACKER ):
             ct.add_tracker(tracker, 0)
          
       
    
    else:
       # add some default trackers
       ct.add_tracker("http://tracker.openbittorrent.com/announce", 0)
       ct.add_tracker("udp://tracker.openbittorrent.com:80/announce", 0)
       ct.add_tracker("http://tracker.publicbt.com/announce", 0)
       ct.add_tracker("udp://tracker.publicbt.com:80/announce", 0)
    
    # if we were given one or more http seeds, add them too
    if job.get_attr( IFTBITTORRENT_HTTP_SEEDS ) != None:
       if type(job.get_attr( IFTBITTORRENT_HTTP_SEEDS )) == str:
          ct.add_url_seed( job.get_attr( IFTBITTORRENT_HTTP_SEEDS ) )
        
       if type(job.get_attr( IFTBITTORRENT_HTTP_SEEDS )) == list:
          for seed in job.get_attr( IFTBITTORRENT_HTTP_SEEDS ):
             ct.add_url_seed( seed )
    
    lt.set_piece_hashes( ct, os.path.dirname( job.get_attr( iftfile.JOB_ATTR_SRC_NAME ) ) )
    
    # encode the torrent into a .torrent buffer
    self.torrent_str = lt.bencode( ct.generate() )
    
    # if given a torrent path, write out the torrent
    if job.get_attr( IFTBITTORRENT_TORRENT_PATH ) != None:
       try:
          fd = open( job.get_attr( IFTBITTORRENT_TORRENT_PATH ), "wb" )
          fd.write( self.torrent_str )
          fd.close()
       except Exception, inst:
          iftlog.exception( self.name + ": could not output torrent data to " + job.get_attr( IFTBITTORRENT_TORRENT_PATH ), inst)
          return E_IOERROR
Beispiel #22
0
 def prepare_transmit( self, job ):
    # file should have remote hostname
    p = job.get_attr( PROTO_PORTNUM )
    if p != None:
       self.port = p
       
    remote_host = None
    try:
       remote_host = job.attrs[ iftfile.JOB_ATTR_DEST_HOST]
    except Exception, inst:
       iftlog.exception( self.name + ": No remote host specified", inst )
       return E_NO_CONNECT
Beispiel #23
0
def make_chunks( filename, chunksize ):
   """
   Split the given file into chunks and store them in the chunks directory.
   Returns (0, file_hash, chunk_hashes, chunk_paths) on success; (nonzero, None, None, None) on error.
   """
   # sanity check
   if not os.path.exists(filename):
      iftlog.log(3, "Skipping " + filename + " since it cannot be found")
      return (E_IOERROR, None, None, None)
   
   if not (stat.S_IWUSR & os.stat( filename ).st_mode):
      iftlog.log(3, "Skipping " + filename + " since I do not have read permission")
      return (E_IOERROR, None, None, None)
   
   # get file hash
   file_hash = get_hash( filename )
   
   first_char = ""
   if filename[0] == "/":
      filename = filename[1:]
      first_char = "/"
      
   file_dir = os.path.basename(filename) + "." + str(file_hash)
   
   # does the directory exist?
   if os.path.exists( __file_chunks_dir + file_dir ):
      iftlog.log(3, "WARNING: " + __file_chunks_dir + file_dir + " exists!  Removing...")
      rc = os.popen("rm -rf " + __file_chunks_dir + file_dir ).close()
      if rc != None:
         iftlog.log(5, "ERROR: could not make chunks for " + filename + "; " + __file_chunks_dir + file_dir + " could not be removed!")
         return (E_IOERROR, None, None, None)
      
   
   
   # make the directory and populate it with chunks
   rc = os.popen("mkdir -p " + __file_chunks_dir + file_dir ).close()
   if rc != None:
      iftlog.log(5, "ERROR: could not make chunk directory " + __file_chunks_dir + file_dir )
      return (E_IOERROR, None, None, None)
   
   # open the file
   fd = None
   try:
      fd = open(first_char + filename, "rb")
      if not fd:
         iftlog.log(5, "ERROR: could not open " + filename + " for reading!")
         cleanup_chunks( filename, file_hash )
         return (E_IOERROR, None, None, None)
   except Exception, inst:
      iftlog.exception("ERROR: could not open " + filename + " for reading!", inst)
      cleanup_chunks( filename, file_hash )
      return (E_UNHANDLED_EXCEPTION, None, None, None)
Beispiel #24
0
 def open_connection( self, job ):
    """
    Get the job and validate receive attributes.
    If a job is given, then don't receive the job; just open the connection and use the given job.
    """
    
    # prepare to receive
    recv_status = 0
    try:
       recv_status = self.__prepare_receive( job )
    except Exception, inst:
       iftlog.exception( self.name + ": could not prepare to receive", inst)
       return E_UNHANDLED_EXCEPTION
Beispiel #25
0
 def recv_job( self, job ):
    
    self.file_to_recv = job.get_attr( iftfile.JOB_ATTR_DEST_NAME )
    
    # begin to receive
    torrent_path = job.get_attr( IFTBITTORRENT_TORRENT_PATH )
    torrent_bits = None
    try:
       torrent_fd = open( torrent_path, "r" )
       torrent_bits = torrent_fd.read()
    except Exception, inst:
       iftlog.exception( self.name + ": could not read torrent file " + str(torrent_path), inst)
       return E_NO_DATA
Beispiel #26
0
 def proto_clean(self):
    # let the remote host know to remove access to this file
    if self.job_attrs.get( iftfile.JOB_ATTR_REMOTE_IFTD ):
       try:
          connection = httplib.HTTPConnection( self.remote_host + ":" + str(self.connect_args[iftproto.PROTO_PORTNUM]) )
          connection.request( "GET", os.path.join( self.job_attrs.get( iftfile.JOB_ATTR_SRC_CHUNK_DIR ), "done") )
          response = connection.getresponse()
          if response.status != 200:
             iftlog.log(5, self.name + ": WARNING: could not tell sender to stop sending!")
             
          response.close()
          connection.close()
       except Exception, inst:
          iftlog.exception( self.name + ": WARNING: could not tell sender to stop sending!  Exception encountered.", inst)
          pass
Beispiel #27
0
 def calc_hash(self):
    """
    Calculate our SHA-1 hash from all of our received chunks.
    Return a string of the hex digest
    """
    try:
       m = hashlib.sha1()
       read_fd = open( self.path, "r")
       buff = read_fd.read()
       m.update( buff )
       read_fd.close() 
       
       return m.hexdigest()
    except Exception, inst:
       iftlog.exception("iftfile.calc_hash", inst)
       return ""
Beispiel #28
0
def cache_startup( cache_basedir, cache_server_portnum ):
   """
   Start up the caching system if not running.  Start the HTTP cache server and ensure that Squid is running.
   
   @return
      0 on success, negative on failure
   """
   
   global cache_ref
   global cache_sem
   global cache_server
   global cache_dir
   
   try:
      # are we running?
      cache_sem.acquire()
      if cache_ref != 0 or cache_server != None:
         iftlog.log(3, "iftcache: already running (refs = " + str(cache_ref) + ")")
         cache_sem.release()
         return 0    # already running
   
      # is Squid running?
      # TODO: is there a less kludgy way to do this?
      procs = os.popen( "ps -A | grep -i squid" ).readlines()
      if len(procs) <= 1:
         # only found our "grep" statement
         iftlog.log(5, "iftcache: Squid does not appear to be running...")
         cache_sem.release()
         return E_INVAL
   
      # attempt to make base directory, but fall back to a sensible default if that doesn't work
      try:
         if not os.path.exists( cache_basedir ):
            os.makedirs( cache_basedir )
         else:
            iftlog.log(3, "iftcache: WARNING: using existing directory " + cache_basedir + " as cache directory")
         cache_dir = cache_basedir
      except Exception, inst:
         try:
            os.makedirs( "/tmp/iftcache_" + str(os.getpid()))
            cache_dir = "/tmp/iftcache_" + str(os.getpid())
            iftlog.log(5, "iftcache: WARNING: " + cache_basedir + " is not valid, using /tmp/iftcache_" + str(os.getpid()) + " as base cache directory path")
         except Exception, inst:
            iftlog.exception("iftcache: Could not create cache directory!", inst)
            cache_sem.release()
            cache_shutdown()
            return E_UNHANDLED_EXCEPTION
Beispiel #29
0
 def reserve_chunk( self, owner, chunk_id, t ):
    """
    Reserve a chunk for a period of time.
    """
    try:
       # if the file is closed by another thread, we might catch an exception
       
       # sanity check
       if self.marked_complete:
          self.__error = E_COMPLETE
          return E_COMPLETE
          
       if self.__mode != MODE_WRITE:
          self.__error = E_BAD_MODE
          return E_BAD_MODE
       
       if self.known_size == True:
          if chunk_id < 0 or chunk_id >= self.__num_chunks:
             self.__error = E_INVAL
             return E_INVAL
          
       else:
          if chunk_id < 0:
             self.__error = E_INVAL
             return E_INVAL
          
          # positive chunk id--do we need to add more locks?
          self.__grow_metadata( chunk_id )
       
       # someone else had better not own this
       if self.__chunk_reservations[chunk_id] >= time.time() and self.__chunk_owners[ chunk_id ] != None:
          return E_TRY_AGAIN
       
       # can't reserve if someone else locked the chunk
       if not self.__chunk_locks[ chunk_id ].acquire(False):
          return E_TRY_AGAIN
       
       # reserve the chunk
       self.__chunk_reservations[ chunk_id ] = time.time() + t
       self.__chunk_owners[ chunk_id ] = owner
       
       self.__chunk_locks[ chunk_id ].release()
    except Exception, inst:
       # only happens if another thread closed the file
       iftlog.exception("iftfile.reserve_chunk: could not resrve chunk " + str(chunk_id) + " for " + str(owner), inst)
       return E_BAD_STATE
Beispiel #30
0
 def mark_chunk( self, chunk_id, value ):
    """
    Mark a chunk as either set (value == True) or unset (value == False)
    Return 0 on success, nonzero otherwise
    """
    
    try:
       if self.marked_complete:
          self.__error = E_COMPLETE
          return E_COMPLETE
       
       if self.__mode != MODE_WRITE:
          # can't modify chunk mask if we aren't in WRITE mode
          self.__error = E_BAD_MODE
          return E_BAD_MODE
       
       # can we write?
       # don't overwrite what's already there
       if chunk_id < self.__num_chunks and chunk_id >= 0 and self.__chunk_mask[ chunk_id ] == True:
          self.__error = E_BAD_STATE
          return E_BAD_STATE
          
       if self.known_size == True:
          if chunk_id < 0 or chunk_id >= self.__num_chunks:
             self.__error = E_INVAL
             return E_INVAL
       else:
          if self.__bytes_max > 0 and self.__bytes_written + self.__chunk_size > self.__bytes_max:
             # problem--this would expand the file bigger than we have capped it at
             iftlog.log(3, "iftfile: attempted to write " + str(self.__chunk_size) + " more bytes beyond required maximum of " + str(self.__bytes_max) + ", will not mark chunk " + str(chunk_id) )
             return E_OVERFLOW
          
          num_chunks = len(self.__chunk_mask)
          if num_chunks < chunk_id + 1:
             self.__chunk_mask = self.__chunk_mask + [False] * (chunk_id + 1 - num_chunks)
          
          self.__num_chunks = len( self.__chunk_mask )
       
       self.__chunk_mask[ chunk_id ] = True
       return 0
    except Exception, inst:
       if self.__open == False:
          iftlog.log(5, "iftfile.mark_chunk: file is not open")
       
       iftlog.exception( "iftfile.mark_chunk: could not mark chunk " + str(chunk_id) + " as " + str(value))
       return E_BAD_STATE