Example #1
0
 def sendSuccess(self, resp, command, data, prepend=None):
     logger.debug("SUCCESS! "+command+":"+data)
     #logger.debug("response: '%s'" % (resp,))
     if prepend:
         w = "%s:%s %s:%s\r\n" % (prepend, command, fencode(resp), data)
     else:
         w = "%s:%s:%s\r\n" % (command, fencode(resp), data)
     self.transport.write(w)
     self.commands[command][CONCURR] -= 1
     try:
         self.serviceQueue(command)
     except:
         print sys.exec_info()
     return resp
Example #2
0
def expireChallenge(challenge, expired=False):
    try:
        challenge = fdecode(challenge)
    except:
        pass
    if outstandingChallenges.has_key(challenge):
        del outstandingChallenges[challenge]
        if expired:
            # XXX: should put these in an expired challenge list so that we
            #      can send a more useful message on failure (must then do
            #      expirations on expired list -- maybe better to just make
            #      these expirations really liberal).
            loggerauth.debug("expired challenge %s" % fencode(challenge))
        else:
            loggerauth.debug("deleted challenge %s" % fencode(challenge))
Example #3
0
    def __init__(self, nKu, node, host, port, filename, offset, length, 
            meta=None):
        """
        Try to verify a file.
        If meta is present, it should be a (metakey, filelikeobj) pair.
        """
        host = getCanonicalIP(host)
        REQUEST.__init__(self, host, port, node)

        filekey = os.path.basename(filename) # XXX: filekey should be hash
        loggervrfy.info("sending VERIFY request to %s:%s" % (host, str(port)))
        Ku = self.node.config.Ku.exportPublicKey()
        url = 'http://'+host+':'+str(port)+'/hash/'+filekey+'?'
        url += 'nodeID='+str(self.node.config.nodeID)
        url += '&port='+str(self.node.config.port)
        url += "&Ku_e="+str(Ku['e'])
        url += "&Ku_n="+str(Ku['n'])
        url += "&offset="+str(offset)
        url += "&length="+str(length)
        if meta:
            url += "&metakey="+str(meta[0])
            url += "&meta="+fencode(meta[1].read())
        self.timeoutcount = 0

        if not isinstance(nKu, FludRSA):
            raise ValueError("must pass in a FludRSA as nKu to SENDVERIFY")

        self.deferred = defer.Deferred()
        ConnectionQueue.enqueue((self, self.headers, nKu, host, port, url))
Example #4
0
 def syncMasterMeta(self):
     """
     sync in-mem fname->sK mappings to disk
     """
     master = fencode(self.master)
     fmaster = open(os.path.join(self.metadir, self.metamaster), 'w')
     fmaster.write(master)
     fmaster.close()
Example #5
0
 def sendCRED(self, passphrase, email):
     logger.debug("sendCRED")
     key = fencode((self.config.Ku.encrypt(passphrase)[0], email))
     if not self.pending['CRED'].has_key(key):
         d = defer.Deferred()
         self.pending['CRED'][key] = d
         self._sendMessage("CRED?"+key)
         return d
     else:
         return self.pending['CRED'][key]
Example #6
0
 def close(self):
     if not self._file.closed: # XXX
         if (self.mode[0] != 'r' or self.mode.find('+') > 0) \
                 and (self._changed 
                     or crc32(str(self._accounting)) != self._accountingcrc):
             saved = self._file.tell()
             self._file.seek(self._dataend)
             self._file.write(fencode(self._accounting))
             self._file.truncate()
             self._file.seek(saved)
         self._file.close()
Example #7
0
 def roundDone(self, responses, key, x):
     #print "done %d:" % x
     #print "roundDone: %s" % responses
     if len(self.pending) != 0 or len(self.outstanding) != 0: 
         # should only get here for nodes that don't accept connections
         # XXX: updatenode -- decrease trust
         for i in self.pending:
             logger.debug("FN: %s couldn't contact node %s (%s:%d)" 
                     % (self.abbrv, fencode(i[2]), i[0], i[1]))
             self.debugpath.append(
                     "FN: %s couldn't contact node %s (%s:%d)" 
                     % (self.abbrv, fencode(i[2]), i[0], i[1]))
             for n in self.kclosest:
                 if (n[0],n[1],n[2]) == i:
                     self.kclosest.remove(n)
     
     logger.info("kFindNode %s terminated successfully after %d queries." 
             % (self.abbrv, len(self.queried)))
     self.debugpath.append("FN: %s terminated successfully after %d queries."
             % (self.abbrv, len(self.queried)))
     self.kclosest.sort(
             lambda a, b: FludkRouting.kCompare(a[2], b[2], key))
     result = {}
     if FludkRouting.k > len(self.kclosest):
         k = len(self.kclosest)
     else:
         k = FludkRouting.k
     result['k'] = self.kclosest[:k]
     #print "result: %s" % result
     #if len(result['k']) > 1:
     #   # if the results (aggregated from multiple responses) contains the
     #   # exact key, just return the correct answer (successful node 
     #   # lookup done).
     #   #print "len(result): %d" % len(result['k'])
     #   #print "result[0][2]: %s %d" % (type(result['k'][0][2]), 
     #   #   result['k'][0][2])
     #   #print "         key: %s %d" % (type(key), key)
     #   if result['k'][0][2] == key:
     #       #print "key matched!"
     #       result['k'] = (result['k'][0],)
     return result
Example #8
0
 def validMasterCAS(key, data, nodeID):
     # returns true if the data fits the characteristics of a master
     # metadata CAS key, i.e., if key==nodeID and the data is the right
     # length.
     nodeID = fencode(long(nodeID,16))
     if key != nodeID:
         return False  
     # XXX: need to do challange/response on nodeID (just as in the
     # regular primitives) here, or else imposters can store/replace
     # this very important data!!!
     # XXX: do some length stuff - should only be as long as a CAS key 
     return True
Example #9
0
def generateTestData(minSize):
    fname = tempfile.mktemp()
    f = open(fname, 'w')
    data = FludCrypto.generateRandom(minSize/50)
    for i in range(0, 51+random.randrange(50)):
        f.write(data)
    f.close()
    filekey = FludCrypto.hashfile(fname)
    filekey = fencode(int(filekey, 16))
    filename = os.path.join("/tmp",filekey)
    os.rename(fname,filename)
    return (filename, filekey)
Example #10
0
 def __init__(self, node, host, port, key, val):
     logger.info("sending kSTORE to %s:%d" % (host, port))
     REQUEST.__init__(self, host, port, node)
     Ku = node.config.Ku.exportPublicKey()
     url = 'http://'+host+':'+str(port)+'/meta/'
     url += fencode(key)+"/"+fencode(val)
     url += '?nodeID='+str(node.config.nodeID)
     url += "&Ku_e="+str(Ku['e'])
     url += "&Ku_n="+str(Ku['n'])
     url += '&port='+str(node.config.port)
     # XXX: instead of a single key/val, protocol will take a series of
     # vals representing the blocks of the coded file and their
     # locations (by nodeID).  The entire thing will be stored under
     # the given key.  Also may need things like signature[s] from 
     # storing node[s], etc.
     #print "in SENDkSTORE.__init__, len(val)=%d" % len(str(val))
     #print "in SENDkSTORE.__init__, len(enc(val))=%d" % len(fencode(val))
     #print "in SENDkSTORE.__init__, len(url)=%d" % len(url)
     self.timeoutcount = 0
     self.deferred = defer.Deferred()
     ConnectionQueue.enqueue((self, host, port, url))
def createFakeData(dir="/tmp", num=CONCURRENT):
    randsrc = open("/dev/urandom", 'rb')
    files = []
    for i in range(num):
        randdata = randsrc.read(256)
        filekey = fencode(int(flud.FludCrypto.hashstring(randdata), 16))
        filename = dir+'/'+filekey
        f = open(filename, 'wb')
        f.write(randdata)
        f.close()
        files.append(filename)
    randsrc.close()
    return files
 def generateFiles(minsize):
     fname = tempfile.mktemp()
     f = open(fname, 'w')
     f.write('\0'*minsize)
     f.write(generateRandom(random.randrange(256)+1))
     f.close()
     filekey = hashfile(fname)
     filekey = fencode(int(filekey, 16))
     filename = os.path.join("/tmp",filekey)
     os.rename(fname,filename)
     filenamebad = os.path.join("/tmp/","bad"+filekey[3:])
     shutil.copy(filename, filenamebad)
     return (filekey, filename, filenamebad)
Example #13
0
def sendChallenge(request, reqKu, id):
    challenge = generateRandom(challengelength) 
    while challenge[0] == '\x00':
        # make sure we have at least challengelength bytes
        challenge = generateRandom(challengelength)
    addChallenge(challenge)
    loggerauth.debug("unencrypted challenge is %s" 
            % fencode(binascii.unhexlify(id)+challenge))
    echallenge = reqKu.encrypt(binascii.unhexlify(id)+challenge)[0]
    echallenge = fencode(echallenge)
    loggerauth.debug("echallenge = %s" % echallenge)
    # since challenges will result in a new req/resp pair being generated,
    # these could take much longer than the primitive_to.  Expire in
    # 15*primitive_to
    reactor.callLater(primitive_to*15, expireChallenge, challenge, True)
    resp = 'challenge = %s' % echallenge
    loggerauth.debug("resp = %s" % resp)
    request.setResponseCode(http.UNAUTHORIZED, echallenge)
    request.setHeader('Connection', 'close')
    request.setHeader('WWW-Authenticate', 'Basic realm="%s"' % 'default')
    request.setHeader('Content-Length', str(len(resp)))
    request.setHeader('Content-Type', 'text/html')
    request.setHeader('Pragma','claimreserve=5555')  # XXX: this doesn't work
    return resp
Example #14
0
 def __init__(self, node, host, port, key, commandName="nodes"):
     """
     """
     logger.info("sending %s (findnode) for %s... to %s:%d" 
             % (commandName, ("%x" % key)[:10], host, port)) 
     self.commandName = commandName
     host = getCanonicalIP(host)
     REQUEST.__init__(self, host, port, node)
     Ku = self.node.config.Ku.exportPublicKey()
     url = 'http://'+host+':'+str(port)+'/'+self.commandName+'/'
     url += fencode(key)
     url += '?nodeID='+str(self.node.config.nodeID)
     url += "&Ku_e="+str(Ku['e'])
     url += "&Ku_n="+str(Ku['n'])
     url += '&port='+str(self.node.config.port)
     self.timeoutcount = 0
     self.deferred = defer.Deferred()
     ConnectionQueue.enqueue((self, node, host, port, key, url))
Example #15
0
def verifyHashes(tarball, ignoreExt=None):
    # return all the names of files in this tarball if hash checksum passes,
    # otherwise return False
    digests = []
    done = False
    if tarball[-7:] == ".tar.gz":
        f = gzip.GzipFile(tarball, 'r:gz')
    else:
        f = open(tarball, 'r')
    empty = tarfile.BLOCKSIZE * '\0'
    while not done:
        bytes = f.read(tarfile.BLOCKSIZE)
        if bytes == "":
            done = True
        elif bytes == empty:
            pass
        else:
            if bytes[0] == '\0' and bytes[124] == '\0':
                print "WARNING: read nulls when expecting file header"
                break
            name = bytes[0:99]
            name = name[:name.find(chr(0))]
            size = int(bytes[124:135], 8)
            blocks = size / tarfile.BLOCKSIZE
            if ignoreExt and name[-len(ignoreExt):] == ignoreExt:
                # gzip doesn't support f.seek(size, 1)
                f.seek(f.tell()+size) 
            else:
                digest = hashstream(f, size)
                digest = fencode(int(digest,16))
                if name == digest:
                    #print "%s == %s" % (name, digest)
                    digests.append(name)
                else:
                    #print "%s != %s" % (name, digest)
                    f.close()
                    return []
            if (size % tarfile.BLOCKSIZE) > 0:
                blocks += 1
            f.seek((blocks * tarfile.BLOCKSIZE) - size + f.tell())
    f.close()
    return digests
Example #16
0
def maketarball(numfiles, avgsize, hashnames=False, addmetas=False):
    tarballname = tempfile.mktemp()+".tar"
    tarball = tarfile.open(tarballname, 'w')
    if addmetas:
        metafname = tempfile.mktemp()
        metaf = file(metafname, 'w')
        metaf.write('m'*48)
        metaf.close()
    for i in xrange(numfiles):
        fname = tempfile.mktemp()
        f = file(fname, 'wb')
        size = int(avgsize * (random.random()+0.5))
        blocksize = 65*1024
        if hashnames:
            sha256 = SHA256.new()
        for j in range(0, size, blocksize):
            if j+blocksize > size:
                block = 'a'*(size-j)
            else:
                block = 'a'*blocksize
            if hashnames:
                sha256.update(block)
            f.write(block)
        f.close()
        arcname = fname
        if hashnames:
            arcname = fencode(int(sha256.hexdigest(),16))
        tarball.add(fname, arcname)
        if addmetas:
            tarball.add(metafname, arcname+".343434.meta")
        os.remove(fname)
    if addmetas:
        os.remove(metafname)
    contents = tarball.getnames()
    tarball.close()
    return tarballname, contents
Example #17
0
def convert(fname, nodeIDandMeta=None):
    """
    Convert a non-BlockFile to a BlockFile, with an optional nodeID/metadata
    pair to add.  The file represented by fname will be a BlockFile upon
    successful return.
    nodeIdAndMeta should be a tuple.  The first element is the nodeID, the
    second the metadata.  Metadata should be a dict (client can send arbitrary
    dict, but the purpose is to have a key/value pair where the key is the
    crc32 checksum of the full metadata, and the value is the chunk of metadata
    being stored with this BlockFile)
    """
    tname = tempfile.mktemp()
    f1 = __builtin__.open(fname, 'rb')
    f2 = __builtin__.open(tname, 'wb')
    size = os.stat(fname)[stat.ST_SIZE]
    f2.write(struct.pack('=Q',size))
    while 1:
        buf = f1.read()
        if buf == "":
            break
        f2.write(buf)
    if nodeIDandMeta == None:
        l = {} 
    else:
        if len(nodeIDandMeta) != 2:
            raise IOError("invalid nodeID/metadata pair")
        nodeID = nodeIDandMeta[0]
        meta = nodeIDandMeta[1]
        if not isinstance(meta, dict):
            raise IOError("invalid metadata (should be a dict)")
        l = {} 
        l[nodeID] = meta
    f2.write(fencode(l))
    f2.close()
    f1.close()
    os.rename(tname, fname)
Example #18
0
def answerChallenge(challenge, Kr, groupIDu, sID, headers={}):
    loggerauth.debug("got challenge: '%s'" % challenge)
    sID = binascii.unhexlify(sID)
    challenge = (fdecode(challenge),)
    response = fencode(Kr.decrypt(challenge))
    # XXX: RSA.decrypt won't restore leading 0's.  This causes
    #      some challenges to fail when they shouldn't -- solved for now
    #      on the server side by generating non-0 leading challenges.
    loggerauth.debug("decrypted challenge to %s" % response)
    responseID = fdecode(response)[:len(sID)]
    loggerauth.debug("  response id: %s" % fencode(responseID))
    if responseID != sID:
        # fail the op.
        # If we don't do this, we may be allowing the server to build a
        # dictionary useful for attack.  The attack is as follows: node A
        # (server) collects a bunch of un-IDed challenge/response pairs by
        # issuing challenges to node B (client).  Then node A uses those
        # responses to pose as B to some other server C.  This sounds
        # farfetched, in that such a database would need to be huge, but in
        # reality, such an attack can happen in real-time, with node A
        # simultaneously serving requests from B, relaying challenges from C to
        # B, and then responding with B's responses to C to gain resources
        # there as an imposter.  The ID string prevents this attack.

        # XXX: trust-- (must go by ip:port, since ID could be innocent)
        raise ImposterException("node %s is issuing invalid challenges --"
                " claims to have id=%s" % (fencode(sID), fencode(responseID)))
    response = fdecode(response)[len(sID):]
    loggerauth.debug("  challenge response: '%s'" % fencode(response))
    response = fencode(response)+":"+groupIDu
    loggerauth.debug("response:groupIDu=%s" % response)
    response = binascii.b2a_base64(response)
    loggerauth.debug("b64(response:groupIDu)=%s" % response)
    response = "Basic %s" % response
    headers['Authorization'] = response
    return headers 
Example #19
0
    def promptUser(self):
        helpDict = {}

        command = raw_input("%s> " % time.ctime())
        commands = command.split(' ') # XXX: should tokenize on any whitespace
        commandkey = commands[0][:4]
        
        # core client operations
        helpDict['exit'] = "exit from the client"
        helpDict['help'] = "display this help message"
        helpDict['ping'] = "send a GETID() message: 'ping host port'"
        helpDict['putf'] = "store a file: 'putf canonicalfilepath'"
        helpDict['getf'] = "retrieve a file: 'getf canonicalfilepath'"
        helpDict['geti'] = "retrieve a file by CAS key: 'geti fencodedCASkey'"
        helpDict['fndn'] = "send a FINDNODE() message: 'fndn hexIDstring'"
        helpDict['list'] = "list stored files (read from local metadata)"
        helpDict['putm'] = "store master metadata"
        helpDict['getm'] = "retrieve master metadata"
        helpDict['cred'] = "send encrypted private credentials: cred"\
                " passphrase emailaddress"
        helpDict['node'] = "list known nodes"
        helpDict['buck'] = "print k buckets"
        helpDict['stat'] = "show pending actions"
        helpDict['stor'] = "store a block to a given node:"\
                " 'stor host:port,fname'"
        helpDict['rtrv'] = "retrieve a block from a given node:"\
                " 'rtrv host:port,fname'"
        helpDict['vrfy'] = "verify a block on a given node:"\
                " 'vrfy host:port:offset-length,fname'"
        helpDict['fndv'] = "retrieve a value from the DHT: 'fndv hexkey'"
        helpDict['dlet'] = "delete from the stor: '[XXX]'"
        if commandkey == 'exit' or commandkey == 'quit':
            self.quit = True
        elif commandkey == 'help':
            self.printHelp(helpDict)
        elif commandkey == 'ping':
            # ping a host
            # format: 'ping host port'
            func = lambda: self.sendPING(commands[1], commands[2])
            self.callFactory(func, commands, self.msgs)
        elif commandkey == 'putf':
            # store a file
            # format: 'putf canonicalfilepath'
            func = lambda: self.sendPUTF(commands[1])
            self.callFactory(func, commands, self.msgs)
        elif commandkey == 'getf':
            # retrieve a file
            # format: 'getf canonicalfilepath'
            func = lambda: self.sendGETF(commands[1])
            self.callFactory(func, commands, self.msgs)
        elif commandkey == 'geti':
            # retrieve a file by CAS ID
            # format: 'geti fencoded_CAS_ID'
            func = lambda: self.sendGETI(commands[1])
            self.callFactory(func, commands, self.msgs)
        elif commandkey == 'fndn':
            # find a node (or the k-closest nodes)
            # format: 'fndn hexIDstring'
            func = lambda: self.sendFNDN(commands[1])
            self.callFactory(func, commands, self.msgs)
        elif commandkey == 'list':
            # list stored files
            self.callFactory(self.sendLIST, commands, self.msgs)
        elif commandkey == 'putm':
            # store master metadata
            self.callFactory(self.sendPUTM, commands, self.msgs)
        elif commandkey == 'getm':
            # retrieve master metadata
            self.callFactory(self.sendGETM, commands, self.msgs)
        elif commandkey == 'cred':
            # send encrypted private credentials to an email address
            # format: 'cred passphrase emailaddress'
            func = lambda: self.sendCRED(
                    command[len(commands[0])+1:-len(commands[-1])-1], 
                    commands[-1])
            self.callFactory(func, commands, self.msgs)
            
        # the following are diagnostic operations, debug-only utility
        elif commandkey == 'node':
            # list known nodes
            self.callFactory(self.sendDIAGNODE, commands, self.msgs)
        elif commandkey == 'buck':
            # show k-buckets
            self.callFactory(self.sendDIAGBKTS, commands, self.msgs)
        elif commandkey == 'stat':
            # show pending actions
            print self.pending
        elif commandkey == 'stor':
            # stor a block to a given node.  format: 'stor host:port,fname'
            storcommands = commands[1].split(',')
            try:
                fileid = int(storcommands[1], 16)
            except:
                linkfile = fencode(long(hashfile(storcommands[1]),16))
                if (os.path.islink(linkfile)):
                    os.remove(linkfile)
                os.symlink(storcommands[1], linkfile)
                storcommands[1] = linkfile
                # XXX: delete this file when the command finishes
            commands[1] = "%s,%s" % (storcommands[0], storcommands[1])
            func = lambda: self.sendDIAGSTOR(commands[1])
            self.callFactory(func, commands, self.msgs)
        elif commandkey == 'rtrv':
            # retrive a block from a given node. format: 'rtrv host:port,fname'
            func = lambda: self.sendDIAGRTRV(commands[1])
            self.callFactory(func, commands, self.msgs)
        elif commandkey == 'vrfy':
            # verify a block on a given node.
            # format: 'vrfy host:port:offset-length,fname'
            logger.debug("vrfy(%s)" % commands[1])
            func = lambda: self.sendDIAGVRFY(commands[1])
            self.callFactory(func, commands, self.msgs)
        elif commandkey == 'dlet':
            print "not yet implemented"
        elif commandkey == 'fndv':
            # try to retrieve a value from the DHT
            # format: 'fndv key'
            func = lambda: self.sendDIAGFNDV(commands[1])
            self.callFactory(func, commands, self.msgs)
        elif command != "":
            reactor.callFromThread(self.queueError, None, self.msgs, 
                    "illegal command '%s'" % command)
Example #20
0
    def _storeFile(self, request, filekey, reqKu, nodeID):
        # [XXX: memory management is not happy here.  might want to look at
        # request.registerProducer().  Otherwise, might have to scrap
        # using the STORE(ROOT(RESOURCE)) deal in favor of
        # producer/consumer model for STORE ops
        # (http://itamarst.org/writings/OSCON03/twisted_internet-108.html).
        # Another option might include subclassing web.resource.Resource
        # and making this derive from that...  Or might be web.Site that
        # needs to be subclassed... Or maybe web.site.Request -
        # web.site.Request.process()?  Request seems doubly-bad: perhaps a
        # copy is made somewhere, because memory mushrooms to 2x big
        # upload, then goes back down to around 1x.
        # [update: This should be fixable in twisted.web2, but I am informed
        # that in the current version, there is no workaround]

        # get the data to a tmp file
        loggerstor.debug("writing store data to tmpfile")
        tmpfile = tempfile.mktemp(dir=self.config.storedir)

        tarball = os.path.join(self.config.storedir,reqKu.id()+".tar")

        # rename and/or prepend the data appropriately
        tmpTarMode = None
        if filekey[-4:] == ".tar":
            tmpfile = tmpfile+".tar"
            tmpTarMode = 'r'
            targetTar = tarball
        elif filekey[-7:] == ".tar.gz":
            tmpfile = tmpfile+".tar.gz"
            tmpTarMode = 'r:gz'
            targetTar = tarball+".gz"
        loggerstor.debug("tmpfile is %s" % tmpfile)

        # XXX: if the server supports both .tar and tar.gz, this is wrong; we'd
        # need to check *both* for already existing dudes instead of just
        # choosing one
        if os.path.exists(tarball+'.gz'):
            tarball = (tarball+'.gz', 'r:gz')
        elif os.path.exists(tarball):
            tarball = (tarball, 'r')
        else:
            tarball = None
        loggerstor.debug("tarball is %s" % str(tarball))

        data = request.args.get('filename')[0]  # XXX: file in mem! need web2.
        # XXX: bad blocking stuff here
        f = open(tmpfile, 'wb')
        f.write(data)
        f.close()
        ftype = os.popen('file %s' % tmpfile)
        loggerstor.debug("ftype of %s is %s" % (tmpfile, ftype.read()))
        ftype.close()

        if tmpTarMode:
            # client sent a tarball
            loggerstor.debug("about to chksum %s" % tmpfile)
            digests = TarfileUtils.verifyHashes(tmpfile, '.meta')
            loggerstor.debug("chksum returned %s" % digests)
            ftype = os.popen('file %s' % tmpfile)
            loggerstor.debug("ftype of %s is %s" % (tmpfile, ftype.read()))
            ftype.close()
            if not digests:
                msg = "Attempted to use non-CAS storage key(s) for" \
                        " STORE tarball"
                loggerstor.debug(msg)
                os.remove(tmpfile)
                request.setResponseCode(http.CONFLICT, msg) 
                return msg
            # XXX: add digests to a db of already stored files (for quick 
            # lookup)
            if tarball:
                tarname, tarnameMode = tarball
                loggerstor.debug("concatenating tarfiles %s and %s" 
                        % (tarname, tmpfile))
                f1 = tarfile.open(tarname, tarnameMode)
                f2 = tarfile.open(tmpfile, tmpTarMode)
                f1names = f1.getnames()
                f2names = f2.getnames()
                f1.close()
                f2.close()
                dupes = [f for f in f1names if f in f2names]
                TarfileUtils.delete(tmpfile, dupes)
                ftype = os.popen('file %s' % tarname)
                loggerstor.debug("ftype of %s is %s" % (tarname, ftype.read()))
                ftype.close()
                TarfileUtils.concatenate(tarname, tmpfile)
                ftype = os.popen('file %s' % tarname)
                loggerstor.debug("ftype of %s is %s" % (tarname, ftype.read()))
                ftype.close()
            else:
                loggerstor.debug("saving %s as tarfile %s" % (tmpfile, 
                    targetTar))
                os.rename(tmpfile, targetTar)
        else:
            # client sent regular file
            h = hashfile(tmpfile)
            if request.args.has_key('meta') and request.args.has_key('metakey'):
                metakey = request.args.get('metakey')[0]
                meta = request.args.get('meta')[0]  # XXX: file in mem! 
            else:
                metakey = None
                meta = None
            if fencode(long(h, 16)) != filekey:
                msg = "Attempted to use non-CAS storage key for STORE data "
                msg += "(%s != %s)" % (filekey, fencode(long(h, 16)))
                os.remove(tmpfile)
                request.setResponseCode(http.CONFLICT, msg) 
                return msg
            fname = os.path.join(self.config.storedir, filekey)
            if os.path.exists(fname):
                loggerstor.debug("adding metadata to %s" % fname)
                f = BlockFile.open(fname,'rb+')
                if not f.hasNode(nodeID):
                    f.addNode(int(nodeID,16), {metakey: meta})
                    f.close()
                os.remove(tmpfile)
            else:
                if os.path.exists(nodeID+".tar"):
                    # XXX: need to do something with metadata!
                    print "XXX: need to do something with metadata for tar!"
                    tarball = tarfile.open(tarname, 'r')
                    if fname in tarball.getnames():
                        loggerstor.debug("%s already stored in tarball" % fname)
                        # if the file is already in the corresponding tarball,
                        # update its timestamp and return success.
                        loggerstor.debug("%s already stored" % filekey)
                        # XXX: update timestamp for filekey in tarball
                        return "Successful STORE"
                    else:
                        loggerstor.debug("tarball for %s, but %s not in tarball"
                                % (nodeID,fname))
                if len(data) < 8192 and fname != tarname: #XXX: magic # (blk sz)
                    # If the file is small, move it into the appropriate
                    # tarball.  Note that this code is unlikely to ever be
                    # executed if the client is an official flud client, as
                    # they do the tarball aggregation thing already, and all
                    # tarballs will be > 8192.  This is, then, really just
                    # defensive coding -- clients aren't required to implement
                    # that tarball aggregation strategy.  And it is really only
                    # useful for filesystems with inefficient small file 
                    # storage.
                    loggerstor.debug("moving small file '%s' into tarball" 
                            % fname)
                    if not os.path.exists(tarname):
                        tarball = tarfile.open(tarname, 'w')
                    else:
                        tarball = tarfile.open(tarname, 'a')
                    # XXX: more bad blocking stuff
                    tarball.add(tmpfile, os.path.basename(fname))
                    if meta:
                        metafilename = "%s.%s.meta" % (os.path.basename(fname), 
                                metakey)
                        loggerstor.debug("adding metadata file to tarball %s" 
                                % metafilename)
                        metaio = StringIO(meta)
                        tinfo = tarfile.TarInfo(metafilename)
                        tinfo.size = len(meta)
                        tarball.addfile(tinfo, metaio)
                    tarball.close()
                    os.remove(tmpfile)
                else:
                    # store the file 
                    loggerstor.debug("storing %s" % fname)
                    os.rename(tmpfile, fname)
                    BlockFile.convert(fname, (int(nodeID,16), {metakey: meta}))

        loggerstor.debug("successful STORE for %s" % filekey)
        return "Successful STORE"
Example #21
0
 def sendChallenge(self):
     self.challenge = fencode(generateRandom(
             ServerPrimitives.challengelength))
     echallenge = self.config.Ku.encrypt(self.challenge)[0]
     echallenge = fencode(echallenge)
     return echallenge
Example #22
0
 def lineReceived(self, line):
     logger.debug("lineReceived: '%s'" % line)
     # commands: AUTH, PUTF, GETF, VRFY
     # status: ? = request, : = successful response, ! = failed response
     command = line[0:4]
     status = line[4]
     data = line[5:]
     #print "data is '%s'" % data
     if not self.authenticated and command == "AUTH":
         if status == '?':
             # asked for AUTH challenge to be sent.  send it
             logger.debug("AUTH challenge requested, sending")
             echallenge = self.factory.sendChallenge()
             self.transport.write("AUTH?"+echallenge+"\r\n")
         elif status == ':' and self.factory.challengeAnswered(data):
             # sent AUTH response and it passed
             logger.debug("AUTH challenge successful")
             self.authenticated = True
             self.transport.write("AUTH:\r\n")
         elif status == ':':
             logger.debug("AUTH challenge failed")
             self.transport.write("AUTH!\r\n")
     elif command == "DIAG":
         if data == "NODE":
             logger.debug("DIAG NODE")
             nodetups = self.factory.config.routing.knownExternalNodes()
             nodes = []
             for n in nodetups:
                 node = list(n)
                 if n[2] in self.factory.config.reputations:
                     node.append(self.factory.config.reputations[n[2]])
                 else:
                     node.append(0)
                 if n[2] in self.factory.config.throttled:
                     node.append(self.factory.config.throttled[n[2]])
                 else:
                     node.append(0)
                 nodes.append(tuple(node))
             self.transport.write("DIAG:NODE%s\r\n" % fencode(nodes))
         elif data == "BKTS":
             logger.debug("DIAG BKTS")
             bucks = eval("%s" % self.factory.config.routing.kBuckets)
             self.transport.write("DIAG:BKTS%s\r\n" % fencode(bucks))
         else:
             dcommand = data[:4]
             ddata = data[5:]
             logger.debug("DIAG %s %s" % (dcommand, ddata))
             self.commands[dcommand][CONCURR] += 1
             d = self.doOp(dcommand, ddata)
             d.addCallback(self.sendSuccess, dcommand, ddata, "DIAG")
             d.addErrback(self.sendFailure, dcommand, ddata, "DIAG")
     elif status == '?':
         # requested an operation to be performed.  If we are below our
         # maximum concurrent ops, do the operation.  Otherwise, put it on
         # the queue to be serviced when current ops finish.  Response is
         # sent back to client when deferreds fire.
         if self.commands[command][CONCURR] >= self.commands[command][MAX]:
             #print "putting %s on the queue" % line
             logger.info("received %s request, enqueuing" % command)
             self.commands[command][QUEUE].insert(0, data)
         else:
             #print "doing %s" % line
             logger.info("received %s request, executing" % command)
             print self.commands[command]
             self.commands[command][CONCURR] += 1
             d = self.doOp(command, data)
             d.addCallback(self.sendSuccess, command, data)
             d.addErrback(self.sendFailure, command, data)
Example #23
0
                return msg
            # XXX: see if there isn't already a 'val' for 'key' present
            #      - if so, compare to val.  Metadata can differ.  Blocks
            #        shouldn't.  However, if blocks do differ, just add the
            #        new values in, up to N (3?) records per key.  Flag these
            #        (all N) as ones we want to verify (to storer and storee).
            #        Expunge any blocks that fail verify, and punish storer's 
            #        trust.
            logger.info("storing dht data to %s" % fname)
            if os.path.exists(fname) and isinstance(md, dict):
                f = open(fname, "rb")
                edata = f.read()
                f.close()
                md = self.mergeMetadata(md, fdecode(edata))
            f = open(fname, "wb")
            f.write(fencode(md))
            f.close()
            return ""  # XXX: return a VERIFY reverse request: segname, offset

    def dataAllowed(self, key, data, nodeID):
        # ensures that 'data' is in [one of] the right format[s] (helps prevent
        # DHT abuse)

        def validValue(val):
            if not isinstance(val, long) and not isinstance(val, int):
                return False  # not a valid key/nodeid
            if val > 2**256 or val < 0:  # XXX: magic 2**256, use fludkrouting
                return False  # not a valid key/nodeid
            return True

        def validMetadata(blockdata, nodeID):
Example #24
0
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
    os.path.abspath(__file__)))))
from flud.FludNode import FludNode
from flud.protocol.FludClient import FludClient
import flud.FludCrypto as FludCrypto
from flud.fencode import fencode, fdecode
from flud.protocol.FludCommUtil import *
from flud.FludDefer import ErrDeferredList

"""
Test code for primitive operations.  These ops include all of the descendents
of ROOT and REQUEST in FludProtocol.
"""

# metadatablock: (block#,n,k,blockdata)
metadatablock = fencode((1,20,40,'adfdsfdffffffddddddddddddddd'))
fake_mkey_offset = 111111

def testerror(failure, message, node):
    """
    error handler for test errbacks
    """
    print "testerror message: %s" % message
    print "testerror: %s" % str(failure)
    print "At least 1 test FAILED"
    return failure

def allGood(_, nKu):
    print "all tests PASSED"
    return nKu 
Example #25
0
 def doOp(self, command, fname):
     #print "got command '%s'" % command
     if command == "PUTF":
         logger.debug("PUTF %s", fname);
         return FileOps.StoreFile(self.factory.node, fname).deferred
     elif command == "GETI":
         logger.debug("GETI %s", fname);
         return FileOps.RetrieveFile(self.factory.node, fname).deferred
     elif command == "GETF":
         logger.debug("GETF %s", fname);
         return FileOps.RetrieveFilename(self.factory.node, fname).deferred
     elif command == "FNDN":
         logger.debug("FNDN %s" % fname);
         try: 
             intval = long(fname, 16)
         except: 
             return defer.fail("fname was not hex")
         return self.factory.node.client.kFindNode(intval)
         # The following is for testing aggregation of kFindNode on same key
         #dl = []
         #for i in [1,2,3,4,5]:
         #   d = self.factory.node.client.kFindNode(intval)
         #   dl.append(d)
         #dlist = defer.DeferredList(dl)
         #return dlist
     elif command == "FNDV":
         logger.debug("FNDV %s", fname);
         try: 
             intval = long(fname, 16)
         except: 
             return defer.fail("fname was not hex")
         return self.factory.node.client.kFindValue(intval)
     elif command == "CRED":
         passphrase, email = fdecode(fname)
         # XXX: allow an optional passphrase hint to be sent in email.
         passphrase = self.factory.node.config.Kr.decrypt(passphrase)
         logger.debug("CRED %s to %s", passphrase, email);
         Kr = self.factory.node.config.Kr.exportPrivateKey()
         Kr['g'] = self.factory.node.config.groupIDr
         fKr = fencode(Kr)
         key = AES.new(binascii.unhexlify(hashstring(passphrase)))
         fKr = '\x00'*(16-(len(fKr)%16))+fKr
         efKr = fencode(key.encrypt(fKr))
         logger.debug("efKr = %s " % efKr)
         d = smtp.sendmail('localhost', "your_flud_client@localhost", 
                 email,
                 "Subject: Your encrypted flud credentials\n\n"
                 "Hopefully, you'll never need to use this email.  Its "
                 "sole purpose is to help you recover your data after a "
                 "catastrophic and complete loss of the original computer "
                 "or hard drive.\n\n"
                 "In that unlucky event, you'll need a copy of your flud "
                 "credentials, which I've included below, sitting between "
                 "the \"---+++---\" markers.  These credentials were "
                 "encrypted with a passphrase of your choosing when you "
                 "installed the flud software.  I'll only say this "
                 "once:\n\n"
                 "YOU MUST REMEMBER THAT PASSWORD IN ORDER TO RECOVER YOUR "
                 "CREDENTIALS.  If you are unable to remember the "
                 "passphrase and your computer fails catastrophically "
                 "(losing its local copy of these credentials), you will "
                 "not be able to recover your data."
                 "\n\n"
                 "Luckily, that's all you should ever need in order to "
                 "recover all your data: your passphrase and these "
                 "credentials."
                 "\n\n"
                 "Please save this email.  You may want to print out hard "
                 "copies and store them safely, forward this email to "
                 "other email accounts, etc.  Since the credentials are "
                 "encrypted, others won't be able to steal them "
                 "without guessing your passphrase. "
                 "\n\n"
                 "---+++---\n"+efKr+"\n---+++---\n")
         return d
         # to decode this email, we search for the '---+++---' markers, make
         # sure the intervening data is all in one piece (remove any line
         # breaks \r or \n inserted by email clients) and call this 'cred',
         # reconstruct the AES key with the H(passphrase) (as above), and
         # then use the key to .decrypt(fdecode(cred)) and call this dcred,
         # then fdecode(dcred[dcred.find('d'):]) and call this ddcred, and
         # finally importPrivateKey(ddcred) and set groupIDr to ddcred['g'].
     elif command == "LIST":
         logger.debug("LIST")
         return defer.succeed(self.factory.config.master)
     elif command == "GETM":
         logger.debug("GETM")
         return FileOps.RetrieveMasterIndex(self.factory.node).deferred
     elif command == "PUTM":
         logger.debug("PUTM")
         return FileOps.UpdateMasterIndex(self.factory.node).deferred
     else:
         #print "fname is '%s'" % fname
         host = fname[:fname.find(':')]
         port = fname[fname.find(':')+1:fname.find(',')]
         fname = fname[fname.find(',')+1:]
         print "%s: %s : %s , %s" % (command, host, port, fname)
         if command == "STOR":
             logger.debug("STOR");
             return self.factory.node.client.sendStore(fname, None, 
                     host, int(port))
         elif command == "RTRV":
             logger.debug("RTRV");
             return self.factory.node.client.sendRetrieve(fname, host, 
                     int(port))
         elif command == "VRFY":
             logger.debug("VRFY");
             offset = port[port.find(':')+1:port.find('-')]
             length = port[port.find('-')+1:]
             port = port[:port.find(':')]
             print "%s: %s : %s %s - %s , %s" % (command, host, port, 
                     offset, length, fname)
             return self.factory.node.client.sendVerify(fname, int(offset), 
                     int(length), host, int(port))
         else:
             logger.debug("bad op");
             return defer.fail("bad op")
Example #26
0
def addChallenge(challenge):
    outstandingChallenges[challenge] = True
    loggerauth.debug("added challenge %s" % fencode(challenge))