def write(self, buf): """ writes response """ if self.rep.tell() > 1000000 or self.abort: rep = self.getResponse() if self.abort: raise Abort() f = open("response.dump", "wb") f.write(rep) f.close() raise Exception("Loaded Url exceeded limit") self.rep.write(buf)
def download(self, ip, port, filename, irc, progressNotify=None): ircbuffer = "" lastUpdate = time() cumRecvLen = 0 dccsock = self.createSocket() dccsock.settimeout(self.timeout) dccsock.connect((ip, port)) if exists(filename): i = 0 nameParts = filename.rpartition(".") while True: newfilename = "%s-%d%s%s" % (nameParts[0], i, nameParts[1], nameParts[2]) i += 1 if not exists(newfilename): filename = newfilename break fh = open(filename, "wb") # recv loop for dcc socket while True: if self.abort: dccsock.close() fh.close() remove(filename) raise Abort() self._keepAlive(irc, ircbuffer) data = dccsock.recv(4096) dataLen = len(data) self.recv += dataLen cumRecvLen += dataLen now = time() timespan = now - lastUpdate if timespan > 1: self.speed = cumRecvLen / timespan cumRecvLen = 0 lastUpdate = now if progressNotify: progressNotify(self.percent) if not data: break fh.write(data) # acknowledge data by sending number of received bytes dccsock.send(struct.pack('!I', self.recv)) dccsock.close() fh.close() return filename
def _download(self, chunks, resume): if not resume: self.info.clear() # Create an initial entry self.info.addChunk("%s.chunk0" % self.filename, (0, 0)) self.chunks = [] # Initial chunk that will load complete file (if needed) init = HTTPChunk(0, self, None, resume) self.chunks.append(init) self.m.add_handle(init.getHandle()) lastFinishCheck = 0 lastTimeCheck = 0 # List of curl handles that are finished chunksDone = set() chunksCreated = False done = False # This is a resume, if we were chunked originally assume still can if self.info.getCount() > 1: self.chunkSupport = True while 1: # Need to create chunks # Will be set later by first chunk if not chunksCreated and self.chunkSupport and self.size: if not resume: self.info.setSize(self.size) self.info.createChunks(chunks) self.info.save() chunks = self.info.getCount() init.setRange(self.info.getChunkRange(0)) for i in range(1, chunks): c = HTTPChunk(i, self, self.info.getChunkRange(i), resume) handle = c.getHandle() if handle: self.chunks.append(c) self.m.add_handle(handle) else: # Close immediatly self.log.debug("Invalid curl handle -> closed") c.close() chunksCreated = True while 1: ret, num_handles = self.m.perform() if ret != pycurl.E_CALL_MULTI_PERFORM: break t = time() # Reduce these calls while lastFinishCheck + 0.5 < t: # List of failed curl handles failed = [] # Save only last exception, we can only raise one anyway ex = None num_q, ok_list, err_list = self.m.info_read() for c in ok_list: chunk = self.findChunk(c) # Check if the header implies success, else add it to failed list try: chunk.verifyHeader() except BadHeader as e: self.log.debug("Chunk %d failed: %s" % (chunk.id + 1, str(e))) failed.append(chunk) ex = e else: chunksDone.add(c) for c in err_list: curl, errno, msg = c chunk = self.findChunk(curl) # Test if chunk was finished if errno != 23 or "0 !=" not in msg: failed.append(chunk) ex = pycurl.error(errno, msg) self.log.debug("Chunk %d failed: %s" % (chunk.id + 1, str(ex))) continue # Check if the header implies success, else add it to failed list try: chunk.verifyHeader() except BadHeader as e: self.log.debug("Chunk %d failed: %s" % (chunk.id + 1, str(e))) failed.append(chunk) ex = e else: chunksDone.add(curl) # No more infos to get if not num_q: # Check if init is not finished so we reset download connections, # note that other chunks are closed and downloaded with init too if failed and init not in failed and init.c not in chunksDone: self.log.error( _("Download chunks failed, fallback to single connection | %s" % (str(ex)))) # List of chunks to clean and remove for chunk in filter(lambda x: x is not init, self.chunks): self.closeChunk(chunk) self.chunks.remove(chunk) remove(fs_encode(self.info.getChunkName(chunk.id))) # Let first chunk load the rest and update the info file init.resetRange() self.info.clear() self.info.addChunk("%s.chunk0" % self.filename, (0, self.size)) self.info.save() elif failed: raise ex lastFinishCheck = t if len(chunksDone) >= len(self.chunks): if len(chunksDone) > len(self.chunks): self.log.warning( "Finished download chunks size incorrect, please report bug." ) # All chunks loaded done = True break # All chunks loaded if done: break # Calc speed once per second, averaging over 3 seconds if lastTimeCheck + 1 < t: diff = [ c.arrived - (self.lastArrived[i] if len(self.lastArrived) > i else 0) for i, c in enumerate(self.chunks) ] self.lastSpeeds[1] = self.lastSpeeds[0] self.lastSpeeds[0] = self.speeds self.speeds = [float(a) / (t - lastTimeCheck) for a in diff] self.lastArrived = [c.arrived for c in self.chunks] lastTimeCheck = t self.updateProgress() if self.abort: raise Abort() # Sleep(0.003) #supress busy waiting - limits dl speed to (1 / x) * buffersize self.m.select(1) for chunk in self.chunks: # Make sure downloads are written to disk chunk.flushFile() self._copyChunks()
def download(self, ip, port, filename, progressNotify=None, resume=None): self.progressNotify = progressNotify self.send_64bits_ack = False if self.filesize < 1 << 32 else True chunk_name = filename + ".chunk0" if resume and os.path.exists(chunk_name): self.fh = open(chunk_name, "ab") resume_position = self.fh.tell() if not resume_position: resume_position = os.stat(chunk_name).st_size resume_position = resume(resume_position) self.fh.truncate(resume_position) self.received = resume_position else: self.fh = open(chunk_name, "wb") lastUpdate = time.time() cumRecvLen = 0 self.dccsock = self.createSocket() recv_list = [self.dccsock] self.dccsock.connect((ip, port)) self.dccsock.setblocking(0) # recv loop for dcc socket while True: if self.abort: self.dccsock.close() self.fh.close() raise Abort() fdset = select.select(recv_list, [], [], 0.1) if self.dccsock in fdset[0]: try: data = self.dccsock.recv(16384) except socket.error as e: if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK: continue else: raise data_len = len(data) if data_len == 0 or self.filesize and self.received + data_len > self.filesize: break cumRecvLen += data_len self._write_func(data) self._send_ack() now = time.time() timespan = now - lastUpdate if timespan > 1: # calc speed once per second, averaging over 3 seconds self.speeds[2] = self.speeds[1] self.speeds[1] = self.speeds[0] self.speeds[0] = float(cumRecvLen) / timespan cumRecvLen = 0 lastUpdate = now self.updateProgress() self.dccsock.close() self.fh.close() os.rename(chunk_name, filename) return filename
if lastTimeCheck + 1 < t: diff = [ c.arrived - (self.lastArrived[i] if len(self.lastArrived) > i else 0) for i, c in enumerate(self.chunks) ] self.lastSpeeds[1] = self.lastSpeeds[0] self.lastSpeeds[0] = self.speeds self.speeds = [float(a) / (t - lastTimeCheck) for a in diff] self.lastArrived = [c.arrived for c in self.chunks] lastTimeCheck = t self.updateProgress() if self.abort: raise Abort() #sleep(0.003) #supress busy waiting - limits dl speed to (1 / x) * buffersize self.m.select(1) for chunk in self.chunks: chunk.flushFile() #make sure downloads are written to disk self._copyChunks() def updateProgress(self): if self.progressNotify: self.progressNotify(self.percent) def findChunk(self, handle): """ linear search to find a chunk (should be ok since chunk size is usually low) """