def decode_from_files(outfile, files, verbose=False):
  filefec.decode_from_files(outfile, files, verbose)


#Remove comment to test zfec decoder with testfile
# files=[]
# for x in xrange(0,10):
#   if (x != 2 and x != 3):
#     files.append(open('test/100KB_0.0'+str(x)+'_10.fec'))
# with open('test/100KB_0_restore','a') as f:
#     decode(f, files, False)
示例#2
0
def main():
    if '-V' in sys.argv or '--version' in sys.argv:
        print("zfec library version: ", libversion)
        print("zunfec command-line tool version: ", __version__)
        return 0

    parser = argparse.ArgumentParser(description="Decode data from share files.")

    parser.add_argument('-o', '--outputfile', required=True, help='file to write the resulting data to, or "-" for stdout', type=str, metavar='OUTF')
    parser.add_argument('sharefiles', nargs='*', help='shares file to read the encoded data from', type=str, metavar='SHAREFILE')
    parser.add_argument('-v', '--verbose', help='print out messages about progress', action='store_true')
    parser.add_argument('-f', '--force', help='overwrite any file which already in place of the output file', action='store_true')
    parser.add_argument('-V', '--version', help='print out version number and exit', action='store_true')
    args = parser.parse_args()

    if len(args.sharefiles) < 2:
        print("At least two sharefiles are required.")
        return 1

    if args.force:
        outf = open(args.outputfile, 'wb')
    else:
        try:
            flags = os.O_WRONLY|os.O_CREAT|os.O_EXCL | (hasattr(os, 'O_BINARY') and os.O_BINARY)
            outfd = os.open(args.outputfile, flags)
        except OSError:
            print("There is already a file named %r -- aborting.  Use --force to overwrite." % (args.outputfile,))
            return 2
        outf = os.fdopen(outfd, "wb")

    sharefs = []
    # This sort() actually matters for performance (shares with numbers < k
    # are much faster to use than the others), as well as being important for
    # reproducibility.
    args.sharefiles.sort()
    for fn in args.sharefiles:
        sharefs.append(open(fn, 'rb'))
    try:
        filefec.decode_from_files(outf, sharefs, args.verbose)
    except filefec.InsufficientShareFilesError as e:
        print(str(e))
        return 3
    finally:
        outf.close()
        for f in sharefs:
            f.close()

    return 0
示例#3
0
文件: undeci.py 项目: axtl/deci
def _fec_decode(ns):
    logging.debug('UNFEC pass started')
    tmpd = tempfile.mkdtemp(dir=os.getcwd())
    logging.debug('created tempdir at %s' % tmpd)

    # walk first input dir, decode as we go along
    for root, dirs, files in os.walk(ns.inputs[0]):
        unrooted = os.path.relpath(root, ns.inputs[0])
        logging.debug('unrooted path: %s' % unrooted)
        for dname in dirs:
            osubdir = os.path.join(tmpd, dname)
            os.mkdir(osubdir)
            logging.debug('created: %s' % osubdir)
        for f in files:
            # get real name
            rname = re.split('\.[0-9]*_[0-9]*\.fec$', f, re.IGNORECASE)[0]
            logging.debug('processing chunks for file: %s' % rname)
            # get all the file chunks into a list
            fecs = []
            for indir in ns.inputs:
                gpath = common.fec_glob(os.path.join(indir, unrooted, rname))
                fecs.extend(glob.glob(gpath))
            logging.debug('FEC chunks found for %s: %s' % (rname, fecs))
            fec_fds = [open(fec, 'rb') for fec in fecs]
            try:
                outpath = os.path.join(tmpd, unrooted, rname)
                outfd = open(outpath, 'wb')
                filefec.decode_from_files(outfd, fec_fds, False)
                logging.debug('decoded successfully to %s' % outpath)
            except filefec.InsufficientShareFilesError as e:
                logging.debug('failed to write %s' % outpath)
                sys.stderr.write(repr(e))
                common.cleanup(tmpd)
                sys.exit(ERR['INSUF_SHARES'])

    # all done, rename to output dir
    if os.path.exists(ns.output) and ns.force:
        shutil.rmtree(ns.output)
        logging.debug('removed existing output dir at %s' % ns.output)
    shutil.move(tmpd, ns.output)
    logging.debug('renamed temp dir %s to output dir %s' % (tmpd, ns.output))
    logging.info('UNFEC pass completed')
示例#4
0
def main():
    if '-V' in sys.argv or '--version' in sys.argv:
        print("zfec library version: ", libversion)
        print("zunfec command-line tool version: ", __version__)
        return 0

    parser = argparse.ArgumentParser(
        description="Decode data from share files.")

    parser.add_argument(
        '-o',
        '--outputfile',
        required=True,
        help='file to write the resulting data to, or "-" for stdout',
        type=str,
        metavar='OUTF')
    parser.add_argument('sharefiles',
                        nargs='*',
                        help='shares file to read the encoded data from',
                        type=str,
                        metavar='SHAREFILE')
    parser.add_argument('-v',
                        '--verbose',
                        help='print out messages about progress',
                        action='store_true')
    parser.add_argument(
        '-f',
        '--force',
        help='overwrite any file which already in place of the output file',
        action='store_true')
    parser.add_argument('-V',
                        '--version',
                        help='print out version number and exit',
                        action='store_true')
    args = parser.parse_args()

    if len(args.sharefiles) < 2:
        print("At least two sharefiles are required.")
        return 1

    if args.force:
        outf = open(args.outputfile, 'wb')
    else:
        try:
            flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL | (hasattr(
                os, 'O_BINARY') and os.O_BINARY)
            outfd = os.open(args.outputfile, flags)
        except OSError:
            print(
                "There is already a file named %r -- aborting.  Use --force to overwrite."
                % (args.outputfile, ))
            return 2
        outf = os.fdopen(outfd, "wb")

    sharefs = []
    # This sort() actually matters for performance (shares with numbers < k
    # are much faster to use than the others), as well as being important for
    # reproducibility.
    args.sharefiles.sort()
    for fn in args.sharefiles:
        sharefs.append(open(fn, 'rb'))
    try:
        filefec.decode_from_files(outf, sharefs, args.verbose)
    except filefec.InsufficientShareFilesError as e:
        print(str(e))
        return 3
    finally:
        outf.close()
        for f in sharefs:
            f.close()

    return 0
示例#5
0
    def download(self, video_name, start_frame):
        print '[user.py] P2Puser starts downloading'
        connected_caches = []
        self.not_connected_caches = not_connected_caches = []
        # Connect to the caches
        cache_ip_addr = retrieve_caches_address_from_tracker(self.tracker_address, 100, self.user_name)
        #cache_ip_addr[0][0] = '[' + cache_ip_addr[0][0] + ']'
        self.cache_ip_addr = cache_ip_addr
        self.num_of_caches = min(self.num_of_caches, len(cache_ip_addr))

        choke_state = 0 # 0 : usual state, 1 : overhead state
        choke_ct = 0

        for i in range(self.num_of_caches):
            each_client = ThreadClient(self, cache_ip_addr[i], self.packet_size, i)
            each_client.put_instruction('ID %s' % self.user_name)
            self.clients.append(each_client)
            connected_caches.append(each_client)
            print '[user.py] ', i, 'th connection is CONNECTED : ' , cache_ip_addr[i]
            if DEBUG_RYAN:
                pdb.set_trace()

        for i in range(self.num_of_caches, len(cache_ip_addr)): #Is it not entering this statement here?
            if DEBUG_RYAN:
                pdb.set_trace()
            each_client = ThreadClient(self, cache_ip_addr[i], self.packet_size, i)
            each_client.put_instruction('ID %s' % self.user_name)
            not_connected_caches.append(each_client)
            print '[user.py] ', i, 'th connection is RESERVED: ' , cache_ip_addr[i]

        available_chunks = set([])
        print '[user.py] putting VLEN', video_name
        self.clients[0].put_instruction('VLEN file-%s' % (video_name))
        print '[user.py] retrieving VLEN'
        vlen_str = self.clients[0].get_response().split('\n')[0]
        vlen_items = vlen_str.split('&')
        print "VLEN: ", vlen_items
        num_frames, code_param_n, code_param_k = int(vlen_items[0]), int(vlen_items[4]), int(vlen_items[5])

        base_file_name = video_name + '.mkv'
        #turning it into an .mvk also works. Probably should store what kind of file it is server side
        #or just make everything .mkv. .MKV is a container file for video, audio, and other stuff.
        #Read here for a nice description:
        #http://lifehacker.com/5893250/whats-the-difference-between-all-these-video-formats-and-which-one-should-i-use
        #base_file_name = video_name + '.mkv'
        try:
            os.mkdir('video-' + video_name)
        except:
            pass

        # Set internal chunk_size through putting an internal instruction into
        # the queue.
        base_file = open('video-' + video_name + '/' + base_file_name, 'ab')
        base_file_full_path = os.path.abspath('video-' + video_name + '/' + base_file_name)

        self.info_thread = infoThread(video_name, code_param_n, code_param_k, self)
        self.info_thread.flag = True
        self.info_thread.start()

        for frame_number in xrange(start_frame, num_frames + 1):
            sys.stdout.flush()
            effective_rates = [0]*len(self.clients)
            assigned_chunks = [0]*len(self.clients)

            if frame_number < num_frames: # Usual frames
                inst_INTL = 'INTL ' + 'CNKN ' + vlen_items[2] # chunk size of typical frame (not last one)
                for client in self.clients:
                    client.put_instruction(inst_INTL)
                self.server_client.put_instruction(inst_INTL)
            else: # Last frame
                inst_INTL = 'INTL ' + 'CNKN ' + vlen_items[3] # chunk size of last frame
                for client in self.clients:
                    client.put_instruction(inst_INTL)
                self.server_client.put_instruction(inst_INTL)

            print '[user.py] frame_number : ', frame_number
            filename = 'file-' + video_name + '.' + str(frame_number)
            # directory for this frame
            folder_name = 'video-' + video_name + '/' + video_name + '.' + str(frame_number) + '.dir/'

            # get available chunks lists from cache A and B.
            inst_CNKS = 'CNKS ' + filename
            inst_RETR = 'RETR ' + filename
            inst_UPDG = 'UPDG '
            inst_NOOP = 'NOOP'
            inst_CACHEDATA = 'CACHEDATA ' 

            ###### DECIDING WHICH CHUNKS TO DOWNLOAD FROM CACHES: TIME 0 ######
            if DEBUG_RYAN:
                pdb.set_trace()
            available_chunks = [0]*len(self.clients) # available_chunks[i] = cache i's availble chunks
            rates = [0]*len(self.clients) # rates[i] = cache i's offered rate
            union_chunks = [] # union of all available indices
            for i in range(len(self.clients)):
                client = self.clients[i]
                client.put_instruction(inst_CNKS)
                return_str = client.get_response().split('&')
                if return_str[0] == '':
                    available_chunks[i] = []
                else:
                    available_chunks[i] = map(str, return_str[0].split('%'))
                    for j in range(len(available_chunks[i])):
                        available_chunks[i][j] = available_chunks[i][j].zfill(2)
                rates[i] = int(return_str[1])
                union_chunks = list( set(union_chunks) | set(available_chunks[i]) )

            ## index assignment here
            # Assign chunks to cache using cache_chunks_to_request.
            print '[user.py] Rates ', rates
            print '[user.py] Available chunks', available_chunks

            assigned_chunks = cache_chunks_to_request(available_chunks, rates, code_param_n, code_param_k)

            effective_rates = [0]*len(rates)
            for i in range(len(rates)):
                effective_rates[i] = len(assigned_chunks[i])

            chosen_chunks = [j for i in assigned_chunks for j in i]

            flag_deficit = int(sum(effective_rates) < code_param_k) # True if user needs more rate from caches

            list_of_cache_requests = []
            # request assigned chunks
            for i in range(len(self.clients)):
                client = self.clients[i]
                client_ip_address = client.address[0] + ':' + str(client.address[1])
                print '[user.py] Server_request 2 = "' , assigned_chunks[i] , '"'
                client_request_string = '%'.join(assigned_chunks[i]) + '&1'
                print "[user.py] [Client " + str(i) + "] flag_deficit: ", flag_deficit, \
                    ", Assigned chunks: ", assigned_chunks[i], \
                    ", Request string: ", client_request_string
                if DEBUG_RYAN:
                    pdb.set_trace()
                cachedata_request_string = client_request_string.replace('&1','&' + client_ip_address)
                list_of_cache_requests.append(filename+ '.' + cachedata_request_string)
                client.put_instruction(inst_UPDG + str(flag_deficit))
                client.put_instruction(inst_RETR + '.' + client_request_string)
                if False: # WHY IS THIS NOT WORKING?
                    if not assigned_chunks[i]:
                        pass
                        #client.put_instruction(inst_NOOP)
                    else:
                        client.put_instruction(inst_RETR + '.' + client_request_string)

            ###### DECIDING CHUNKS THAT HAVE TO BE DOWNLOADED FROM CACHE: TIME 0 ######
            # Before CACHE_DOWNLOAD_DURATION, also start requesting chunks from server.
            server_request = []
            server_request_2 = []
            cdrs = '_' .join(list_of_cache_requests) #cache data request string. Used for parsing inside of server.py's ftp_CACHEDATA
            if frame_number < num_frames:
                size_of_chunks = vlen_items[2]
            else:
                size_of_chunks = vlen_items[3]
            cdrs = cdrs + '?' + str(size_of_chunks)    
            chosen_chunks = list(chosen_chunks)
            num_chunks_rx_predicted = len(chosen_chunks)
            server_request = chunks_to_request(chosen_chunks, range(0, code_param_n), code_param_k - num_chunks_rx_predicted)
            num_of_chks_from_server = len(server_request)
            if num_of_chks_from_server == 0:
                self.server_client.put_instruction(inst_NOOP)
                print '[user.py] Caches handling code_param_k chunks, so no request to server. Sending a NOOP'
            else:
                print '[user.py] Server_request = "' , server_request , '"'
                server_request_string = '%'.join(server_request) + '&1'
                if DEBUG_RYAN:
                    pdb.set_trace()
                self.server_client.put_instruction(inst_CACHEDATA + cdrs)
                self.server_client.put_instruction(inst_RETR + '.' + server_request_string)
                if(DEBUGGING_MSG):
                    print "[user.py] Requesting from server: ", server_request, ", Request string: ", server_request_string
                    print "[user.py] Sending to server's ftp_CACHEDATA: ", inst_CACHEDATA + cdrs

            #update_server_load(tracker_address, video_name, num_of_chks_from_server)

            sleep(CACHE_DOWNLOAD_DURATION)
            ###### STOPPING CACHE DOWNLOADS: TIME 8 (CACHE_DOWNLOAD_DURATION) ######

            # immediately stop cache downloads.
            for client in self.clients:
                try:
                    client.client.abort()
                except:
                    print "[user.py] Cache connections suddenly aborted. Stopping all download."
                    return
            print "[user.py] Cache connections aborted for frame %d" % (frame_number)

            ###### REQUEST ADDITIONAL CHUNKS FROM SERVER: TIME 8 (CACHE_DOWNLOAD_DURATION) ######
            # Request from server remaining chunks missing
            # Look up the download directory and count the downloaded chunks
            chunk_nums_rx = chunk_nums_in_frame_dir(folder_name)
            if (DEBUGGING_MSG):
                print "%d chunks received so far for frame %d: " % (len(chunk_nums_rx), frame_number)
                print chunk_nums_rx

            # Add the chunks that have already been requested from server

            chunk_nums_rx = list (set(chunk_nums_in_frame_dir(folder_name)) | set(server_request))
            print "[user.py] chunk_nums_rx", chunk_nums_rx
            addtl_server_request = []
            num_chunks_rx = len(chunk_nums_rx)
            if (num_chunks_rx >= code_param_k):
                print "[user.py] No additional chunks to download from the server. Sending a NOOP"
                self.server_client.put_instruction(inst_NOOP)
            else:
                addtl_server_request = chunks_to_request(chunk_nums_rx, range(0, code_param_n), code_param_k - num_chunks_rx)
                print "[user.py] addtl_server_requests", addtl_server_request
                if addtl_server_request:
                    addtl_server_request_string = '%'.join(addtl_server_request) + '&1'     # The last digit '1' means 'I am user'
                    # server should always be set with flag_deficit = 0 (has all chunks)
                    self.server_client.put_instruction(inst_RETR + '.' + addtl_server_request_string)
                    if(DEBUGGING_MSG):
                        print "[user.py] Requesting from server: ", addtl_server_request
                elif (DEBUGGING_MSG):
                    print "No unique chunks from server requested."

            ###### WAIT FOR CHUNKS FROM SERVER TO FINISH DOWNLOADING: TIME 10 ######
            sleep(SERVER_DOWNLOAD_DURATION)

            if (DEBUGGING_MSG):
                print "[user.py] Waiting to receive all elements from server."
            if frame_number > start_frame and (server_request or addtl_server_request) and VLC_PLAYER_USE:
                # Need to pause it!
                self.VLC_pause_video()
            if server_request:
                resp_RETR = self.server_client.get_response()
                parsed_form = parse_chunks(resp_RETR)
                fname, framenum, chunks, user_or_cache = parsed_form
                print "[user.py] Downloaded chunks from server: ", chunks
            if addtl_server_request:
                resp_RETR = self.server_client.get_response()
                parsed_form = parse_chunks(resp_RETR)
                fname, framenum, chunks, user_or_cache = parsed_form
                print "[user.py] Downloaded chunks from server: ", chunks

            # Now play it
            if frame_number > start_frame and (server_request or addtl_server_request) and VLC_PLAYER_USE:
                self.VLC_pause_video()

            chunk_nums = chunk_nums_in_frame_dir(folder_name)
            num_chunks_rx = len(chunk_nums)
            if num_chunks_rx >= code_param_k and DEBUGGING_MSG:
                print "[user.py] Received", code_param_k, "packets"
            else:
                print "[user.py] Did not receive", code_param_k, "packets for this frame."

            # abort the connection to the server
            self.server_client.client.abort()

            # put together chunks into single frame; then concatenate onto original file.
            print 'about to decode...'
            chunksList = chunk_files_in_frame_dir(folder_name)

            if frame_number != start_frame:
                print 'size of base file:', os.path.getsize('video-' + video_name + '/' + base_file_name)
            print 'trying to decode'
            filefec.decode_from_files(base_file, chunksList)
            print 'decoded.  Size of base file =', os.path.getsize('video-' + video_name + '/' + base_file_name)
            if frame_number == 1 and VLC_PLAYER_USE:
                self.VLC_empty_list()
                self.VLC_start_video(base_file_full_path)

            if USER_TOPOLOGY_UPDATE:
                if choke_state == 0: # Normal state
                    print '[user.py] Normal state : ', choke_ct
                    choke_ct += 1
                    if choke_ct == T_choke:
                        choke_ct = 0
                        if len(not_connected_caches) == 0:
                            pass
                        else: # Add a new cache temporarily
                            new_cache_index = random.sample(range(len(not_connected_caches)), 1)
                            if new_cache_index >= 0:
                                new_cache = not_connected_caches[new_cache_index[0]]
                                self.clients.append(new_cache)
                                connected_caches.append(new_cache)
                                not_connected_caches.remove(new_cache)
                                print '[user.py] Topology Update : Temporarily added ', new_cache.address
                                choke_state = 1 # Now, move to transitional state
                                choke_ct = 0
                                print '[user.py] Topology Update : Now the state is changed to overhead staet'
                                #print '[user.py]', connected_caches, not_connected_caches, self.clients
                                print '[user.py] conneced caches', self.clients

                elif choke_state == 1: # Overhead state
                    print '[user.py] Overhead state : ', choke_ct
                    choke_ct += 1
                    if choke_ct == T_choke2: # Temporary period to spend with temporarily added node
                        rate_vector = [0] * len(self.clients)
                        p_vector = [0] * len(self.clients)
                        for i in range(len(self.clients)):
                            rate_vector[i] = len(assigned_chunks[i])
                            p_vector[i] = math.exp( -eps_choke * rate_vector[i])
                        p_sum = sum(p_vector)
                        for i in range(len(self.clients)):
                            p_vector[i] /= p_sum

                        cdf = [(0,0)] * len(self.clients)
                        cdf[0] = (0, 0)
                        for i in range(1, len(self.clients)):
                            cdf[i] = (i, cdf[i-1][1] + p_vector[i-1])

                        print '[user.py] cdf :', cdf
                        client_index = max(i for r in [random.random()] for i,c in cdf if c <= r) # http://stackoverflow.com/questions/4265988/generate-random-numbers-with-a-given-numerical-distribution
                        removed_cache = self.clients[client_index]
                        #removed_cache.put_instruction('QUIT')
                        self.clients.remove(removed_cache)
                        connected_caches.remove(removed_cache)
                        not_connected_caches.append(removed_cache)

                        print '[user.py] Topology Update : ', removed_cache.address, 'is chocked.'

                        choke_state = 0 # Now, move to normal state
                        choke_ct = 0