Exemple #1
0
	def __init__(self,path):
		'''
		DB table to store things as files and directories. This is
		designed to reduce ram usage when reading things from large
		databases. Specifically this is designed for caches.

		# variables #
		.path
		  The path on the filesystem where the table is stored.
		.names
		  Gives you a list containing the names of all stored
		  values as strings.
		.namePaths
		  Gives you a dict where the keys are the names and
		  the value is the path of that value database file
		.length
		  The length of names stored in this table
		'''
		# path of the root of the cache, this is where files
		# will be stored on the system
		self.path=path
		# create the paths if they do not exist
		if not pathExists(self.path):
			makedirs(self.path)
		debug.add('table path',self.path)
		# the path prefix is for tables stored in tables
		self.pathPrefix=''
		# tables are stored as files
		tempTable=[]
		# array of all the value names stored on table
		namesPath=pathJoin(self.path,'names.table')
		# if no namepaths exist create them
		if not pathExists(pathJoin(namesPath)):
			# write the new value to the system
			writeFile(namesPath,pickle(dict()))
		# load the name paths
		self.namePaths=unpickle(loadFile(namesPath))
		debug.add('self.namePaths',self.namePaths)
		# create a array of all the names of values stored
		self.names=self.namePaths.keys()
		debug.add('self.names',self.names)
		# length of all the values stored on the table
		self.length=len(self.names)
		debug.add('self.length',self.length)
		# the protected list is a array of names that are
		# protected from limit cleaning
		protectedPath=pathJoin(self.path,'protected.table')
		if pathExists(pathJoin(protectedPath)):
			# load the list
			self.protectedList=unpickle(loadFile(protectedPath))
		else:
			# create a blank list
			self.protectedList=[]
		# limit value to limit the number of values
		# load the limit value from file if it exists
		limitPath=pathJoin(self.path,'limit.table')
		if pathExists(pathJoin(limitPath)):
			self.limit=unpickle(loadFile(limitPath))
		else:
			self.limit=None
Exemple #2
0
def verifyClientPutAttestation(block_Id, key_block_Version_No, new_Version_No,
                               New_Hash, encryptedEncodedContent,
                               client_Put_Attest, currHash, currHashSign):
    global keyDistributor
    key = keyDistributor.getPublicKey(block_Id, 'cloud')
    key = RSA.importKey(key)
    h = hash(encryptedEncodedContent)
    concat = str(block_Id) + str(key_block_Version_No) + str(new_Version_No)
    #   sign=key.sign(h,'')
    hashOfElements = hash(concat)
    #    new_chain_Hash=hash(client_Put_Attest+chain_Hash)
    hashOfCloudPutAttest = hash(concat)
    #    hashOfElements=hash(concat+chain_Hash)
    #Verifying if new hash is hash of block content
    if h == p.unpickle(New_Hash):
        #Verifying verification of attestation signature
        if (key.verify(hashOfElements, p.unpickle(client_Put_Attest))):
            cloudSignKey = loadSigningKey()
            sign = cloudSignKey.sign(hashOfCloudPutAttest, '')
            return [1, sign]
        else:
            return [0, 'nothing']
    else:
        print "New hash is not hash of block content"
        return [0, 'nothing']
Exemple #3
0
    def __init__(self, path):
        '''
		DB table to store things as files and directories. This is
		designed to reduce ram usage when reading things from large
		databases. Specifically this is designed for caches.

		# variables #
		.path
		  The path on the filesystem where the table is stored.
		.names
		  Gives you a list containing the names of all stored
		  values as strings.
		.namePaths
		  Gives you a dict where the keys are the names and
		  the value is the path of that value database file
		.length
		  The length of names stored in this table
		'''
        # path of the root of the cache, this is where files
        # will be stored on the system
        self.path = path
        # create the paths if they do not exist
        if not pathExists(self.path):
            makedirs(self.path)
        debug.add('table path', self.path)
        # the path prefix is for tables stored in tables
        self.pathPrefix = ''
        # tables are stored as files
        tempTable = []
        # array of all the value names stored on table
        namesPath = pathJoin(self.path, 'names.table')
        # if no namepaths exist create them
        if not pathExists(pathJoin(namesPath)):
            # write the new value to the system
            writeFile(namesPath, pickle(dict()))
        # load the name paths
        self.namePaths = unpickle(loadFile(namesPath))
        debug.add('self.namePaths', self.namePaths)
        # create a array of all the names of values stored
        self.names = self.namePaths.keys()
        debug.add('self.names', self.names)
        # length of all the values stored on the table
        self.length = len(self.names)
        debug.add('self.length', self.length)
        # the protected list is a array of names that are
        # protected from limit cleaning
        protectedPath = pathJoin(self.path, 'protected.table')
        if pathExists(pathJoin(protectedPath)):
            # load the list
            self.protectedList = unpickle(loadFile(protectedPath))
        else:
            # create a blank list
            self.protectedList = []
        # limit value to limit the number of values
        # load the limit value from file if it exists
        limitPath = pathJoin(self.path, 'limit.table')
        if pathExists(pathJoin(limitPath)):
            self.limit = unpickle(loadFile(limitPath))
        else:
            self.limit = None
Exemple #4
0
def test_pickle_dotdict():
    from pickle import dumps as pickle, loads as unpickle
    from sampleproject.toolkit.utils import DotDict

    orig = DotDict(dict(a=1, b=dict(c=2)))
    copy = unpickle(pickle(orig))
    assert 2 == copy.b.c
Exemple #5
0
def test_pickle():
    from pickle import dumps as pickle, loads as unpickle
    from temboardagent.postgres import Postgres

    orig = Postgres(host='myhost')
    copy = unpickle(pickle(orig))
    assert 'myhost' == copy.host
def say_hello_worker(pickled_app, *a, **kw):
    app = unpickle(pickled_app)
    with app.postgres.connect() as conn:
        conn.execute("""SELECT 'Hello World' AS message, NOW() AS time;""")
        row = list(conn.get_rows())[0]
    logger.info("Hello from worker.")
    return {"message": row['message'], "time": row['time']}
Exemple #7
0
def test_pickle_dotdict():
    from pickle import dumps as pickle, loads as unpickle
    from temboardagent.utils import DotDict

    orig = DotDict(dict(a=1, b=dict(c=2)))
    copy = unpickle(pickle(orig))
    assert 2 == copy.b.c
Exemple #8
0
def checkFreshness():
    for block_Id, block_Data in CloudAttestations.iteritems():
        block_Data = CloudAttestations[block_Id]
        firstOccurenceFlag = 1
        list1 = []
        for block_Version_No, versionData in block_Data.iteritems():
            list1.extend(versionData)
        j = 1
        for i in range(0, len(list1) / 4):
            if i == 0:
                previousChainHash = ''
            concat = str(p.unpickle(
                list1[(i * 4) + 1])) + str(previousChainHash)
            chainHash = SHA256.new(concat).hexdigest()
            if chainHash == list1[(i * 4) + 3]:
                pass


#	        print "read fresh fine"
            else:
                print "read freshness splitting for ID:%s version:%s " % (
                    block_Id, block_Version_No)
                break
            previousChainHash = chainHash
        print "Readfreshness guaranteed for Block ID:%s" % block_Id
Exemple #9
0
 def decodeAndDecrypt(self):
     self.encryptedContent = base64.b64decode(self.b64)
     self.key_block_Version_No = p.unpickle(self.key_block_Version_No)
     self.secretKey = AES.new(self.secretKey,
                              AES.MODE_CTR,
                              counter=lambda: self.key_block_Version_No)
     self.content = self.secretKey.decrypt(self.encryptedContent)
Exemple #10
0
def test_app_pickle():
    from pickle import dumps as pickle, loads as unpickle
    from sampleproject.toolkit.app import BaseApplication

    empty_generator = (x for x in [])
    orig = BaseApplication(specs=empty_generator)
    orig.config.update(dict(a=1))
    copy = unpickle(pickle(orig))
    assert copy.config
Exemple #11
0
def verifySignature(block_Id, encryptedEncodedContent, hashSign):
    global keyDistributor
    key = keyDistributor.getPublicKey(block_Id, 'cloud')
    key = RSA.importKey(key)
    hashSign = p.unpickle(hashSign)
    h = SHA256.new(encryptedEncodedContent).hexdigest()
    if (key.verify(h, hashSign)):
        return 1
    else:
        return 0
Exemple #12
0
def test_app_pickle():
    from pickle import dumps as pickle, loads as unpickle
    from temboardagent.cli import Application

    empty_generator = (x for x in [])
    orig = Application(specs=empty_generator)
    orig.config.update(dict(a=1))
    copy = unpickle(pickle(orig))
    assert [] == copy.specs
    assert copy.config
Exemple #13
0
 def verifyCloudGetAttestation(self):
     self.cloudPublicKey = p.unpickle(cloudStorage.getPublicKey())
     self.block_hashPickled = p.pickle(self.block_hash)
     self.key_block_Version_NoPickled = p.pickle(self.key_block_Version_No)
     self.concat = str(self.block_Id) + str(
         self.key_block_Version_NoPickled) + str(
             self.block_Version_No) + self.nonce
     self.hashOfElements = SHA256.new(self.concat).hexdigest()
     #Verifying verification of attestation signature
     if (self.cloudPublicKey.verify(self.hashOfElements,
                                    p.unpickle(self.cloud_Get_Attest))):
         #sending to auditor
         auditorReply = keyDistributor.putAttestations(
             self.username, "cloudgetattestation", self.block_Id,
             self.block_Version_No, self.cloud_Get_Attest,
             self.key_block_Version_NoPickled, self.block_hashPickled,
             self.chain_Hash)
         return 1
     else:
         return 0
Exemple #14
0
 def get(self):
     if cloudStorage.blockExists(self.block_Id):
         self.nonce = self.setNonce()
         [
             self.block_Version_No, self.b64, self.hashSign,
             self.key_block_Version_No, self.cloud_Get_Attest,
             self.chain_Hash, self.old_Hash
         ] = cloudStorage.get(self.block_Id, self.username, self.nonce)
         self.setSecretKey()
         self.decodeAndDecrypt()
         self.hashSign = p.unpickle(self.hashSign)
     else:
         print "I need to put dummy record first"
Exemple #15
0
    def load_from_cache(self):
        """ Loads the package dict from a cache file """
        if not isfile(self._cache):
            raise CacheError(_("Cache file does not exist: {0}").format(self._cache))

        if not isfile(self._db) or stat(self._db).st_mtime > stat(self._cache).st_mtime:
            self.clear_cache()
            raise CacheError(_("Cache is outdated"))

        try:
            return unpickle(open(self._cache, "rb"))
        except:
            self.clear_cache()
            raise CacheError(_("Could not load cache"))
Exemple #16
0
    def load_from_cache(self):
        """ Loads the package dict from a cache file """
        try:
            ctime = getctime(self._cache)

            if getctime(self._db) > ctime or getctime(__file__) > ctime:
                raise CacheError(_("Cache is outdated: {0}").format(self._cache))
        except OSError:
            raise CacheError(_("Cache is outdated: {0}").format(self._cache))

        try:
            with open(self._cache, "rb") as f:
                return unpickle(f)
        except:
            raise CacheError(_("Could not load cache: {0}").format(self._cache))
Exemple #17
0
    def load_from_cache(self):
        ''' Loads the package dict from a cache file '''
        try:
            ctime = getctime(self._cache)

            if getctime(self._db) > ctime or getctime(__file__) > ctime:
                raise CacheError(
                    _('Cache is outdated: {0}').format(self._cache))
        except OSError:
            raise CacheError(_('Cache is outdated: {0}').format(self._cache))

        try:
            with open(self._cache, 'rb') as f:
                return unpickle(f)
        except:
            raise CacheError(
                _('Could not load cache: {0}').format(self._cache))
Exemple #18
0
def hadoop_line_features(line):
    ''' Convert a correctly-formatted line of text to a list of GeoJSON features.
    
        Allows Hadoop to stream features from the mapper to the reducer.
        See also skeletron-hadoop-mapper.py and skeletron-hadoop-reducer.py.
    '''
    id, prop, geom = line.split()
    
    id = json_decode(id)
    properties = dict(unpickle(b64decode(prop)))
    geometry = wkb_decode(b64decode(geom))
    
    parts = geometry.geoms if hasattr(geometry, 'geoms') else [geometry]
    
    return [dict(type='Feature', id=id, properties=properties,
                 geometry=part.__geo_interface__)
            for part
            in parts
            if hasattr(part, '__geo_interface__')]
Exemple #19
0
	def setProtected(self,name):
		'''
		Set a name in the table to be protected from removal
		because of limits.
		'''
		# generate the filepath to the protected values
		# list
		filePath=pathJoin(self.path,'protected.table')
		# check if the path exists
		if pathExists(filePath):
			# read the protected list from the file
			protectedList=unpickle(loadFile(filePath))
		else:
			# create the list and append the name
			protectedList=[]
		# append the new value to the list
		protectedList.append(name)
		# pickle the protected list for storage
		protectedList=pickle(protectedList)
		# write the changes back to the protected list
		writeFile(filePath,protectedList)
Exemple #20
0
	def loadValue(self,name):
		'''
		Loads a saved value and returns it.
		'''
		# find the file path in the names array
		if name in self.names:
			filePath=self.namePaths[name]
		else:
			return False
		# check if the path exists
		if pathExists(filePath):
			# load the data
			fileData=loadFile(filePath)
		else:
			# return false if the value does not exist
			return False
		# unpickle the filedata
		fileData = unpickle(fileData)
		debug.add('loading value '+str(name),fileData)
		# returns the value of a table stored on disk
		return fileData
Exemple #21
0
    def setProtected(self, name):
        '''
		Set a name in the table to be protected from removal
		because of limits.
		'''
        # generate the filepath to the protected values
        # list
        filePath = pathJoin(self.path, 'protected.table')
        # check if the path exists
        if pathExists(filePath):
            # read the protected list from the file
            protectedList = unpickle(loadFile(filePath))
        else:
            # create the list and append the name
            protectedList = []
        # append the new value to the list
        protectedList.append(name)
        # pickle the protected list for storage
        protectedList = pickle(protectedList)
        # write the changes back to the protected list
        writeFile(filePath, protectedList)
Exemple #22
0
def hadoop_line_features(line):
    ''' Convert a correctly-formatted line of text to a list of GeoJSON features.
    
        Allows Hadoop to stream features from the mapper to the reducer.
        See also skeletron-hadoop-mapper.py and skeletron-hadoop-reducer.py.
    '''
    id, prop, geom = line.split()

    id = json_decode(id)
    properties = dict(unpickle(b64decode(prop)))
    geometry = wkb_decode(b64decode(geom))

    parts = geometry.geoms if hasattr(geometry, 'geoms') else [geometry]

    return [
        dict(type='Feature',
             id=id,
             properties=properties,
             geometry=part.__geo_interface__) for part in parts
        if hasattr(part, '__geo_interface__')
    ]
Exemple #23
0
    def loadValue(self, name):
        '''
		Loads a saved value and returns it.
		'''
        # find the file path in the names array
        if name in self.names:
            filePath = self.namePaths[name]
        else:
            return False
        # check if the path exists
        if pathExists(filePath):
            # load the data
            fileData = loadFile(filePath)
        else:
            # return false if the value does not exist
            return False
        # unpickle the filedata
        fileData = unpickle(fileData)
        debug.add('loading value ' + str(name), fileData)
        # returns the value of a table stored on disk
        return fileData
Exemple #24
0
 def feed(self, sock):
     if not self.len:
         rcvd = sock.recv(4 - len(self.lenbuf))
         if not rcvd:
             raise EOFError
         self.lenbuf += rcvd
         if len(self.lenbuf) == 4:
             self.len = unpack('I', self.lenbuf)[0]
             self.lenbuf = b''
     elif len(self.databuf) < self.len:
         sock.settimeout(0)
         try:
             while len(self.databuf) < self.len:
                 rcvd = sock.recv(self.len - len(self.databuf))
                 if not rcvd:
                     raise EOFError
                 self.databuf += rcvd
             self.data = unpickle(self.databuf)
         except BlockingIOError:
             pass
         finally:
             sock.settimeout(None)
Exemple #25
0
def get(block_Id, user, nonce):
    global staleStorage
    global storage
    #This is to make sure that wrong hash is sent back so that client cries about integrity
    if (keyDistributor.hasAccess(block_Id, user, 'r')) != 0:
        if returnWrongHash == 1:
            [
                block_Version_No, content, new_Hash, hashSign,
                key_block_Version_No, client_Put_Attest, chain_Hash
            ] = storage[block_Id + 1]
        else:
            #            if simulateForkAttackFlag == 1:
            #                print "hahaha given back stale data"
            #                [block_Version_No,content,new_Hash,hashSign,key_block_Version_No,client_Put_Attest,chain_Hash]=staleStorage[block_Id]
            #            else:
            [
                block_Version_No, content, new_Hash, hashSign,
                key_block_Version_No, client_Put_Attest, chain_Hash
            ] = storage[block_Id]
    else:
        return [0, 0, 0, 0, 0, 0, 0]
#Get pickled form of cloud_get_attestation
    cloud_Get_Attest = createCloudGetAttestation(block_Id,
                                                 key_block_Version_No,
                                                 block_Version_No, content,
                                                 chain_Hash, nonce)
    chain_Hash = hash(str(p.unpickle(cloud_Get_Attest)) + chain_Hash)
    #Need to update chain hash in stored value
    storage[block_Id] = [
        block_Version_No, content, new_Hash, hashSign, key_block_Version_No,
        client_Put_Attest, chain_Hash
    ]
    return [
        block_Version_No, content, hashSign, key_block_Version_No,
        cloud_Get_Attest, chain_Hash, new_Hash
    ]
Exemple #26
0
                #                obj.block_Version_No=int(raw_input("current Block version: %s,Enter New:" %obj.block_Version_No))
                if obj.createClientPutAttestation() == 0:
                    print("User %s has no write privileges for Block:%d" %
                          (obj.username, obj.block_Id))
                else:
                    [returnCode, cloudReply, chain_Hash] = obj.put(
                        p.pickle(obj.clientPutAttestation), obj.block_Id,
                        obj.key_block_Version_No, obj.block_Version_No,
                        obj.block_hashPickled, obj.content,
                        p.pickle(
                            hashAndSign(obj.block_Id, obj.b64, obj.username)))

                    #                [returnCode,cloudReply,chain_Hash]=put(p.pickle(obj.clientPutAttestation),obj.block_Id,obj.key_block_Version_No,obj.block_Version_No,obj.block_hashPickled,obj.content,p.pickle(hashAndSign(obj.block_Id,obj.content,obj.username)),obj.username)

                    if returnCode == 1:
                        cloudPublicKey = p.unpickle(
                            cloudStorage.getPublicKey())
                        obj.hashOfElements = hash(obj.concat)
                        if verifySignature(cloudPublicKey,
                                           p.unpickle(cloudReply),
                                           obj.hashOfElements):
                            print(
                                "Cloud Put attestation for Block %s looks good. I'll store it for later use"
                                % obj.block_Id)
                        else:
                            print(
                                "Cloud Put attestation verification failed for Block %s however cloud has put the item"
                                % obj.block_Id)
                    else:
                        print cloudReply
            else:
                print("Cloud Get attestation failed for Block:%s" %
Exemple #27
0
 def test_04_pickle(self):
   'util.symbol: pickling'
   cucumber = symbol.cucumber
   gerkin = pickle(cucumber,HIGHEST_PROTOCOL) #NOTE: MUST be protocol 2+
   veggie = unpickle(gerkin)
   assert veggie is cucumber
Exemple #28
0
 def _get(self, txn, key, default):
     value = self._db.get(key.encode("unicode-escape"), None, txn)
     if value is None:
         return default
     else:
         return unpickle(value)
Exemple #29
0
    def __init__(self, config_filename, cold_start=False, port=22244):
        """
        :param config_filename: \
            The filename of the config file for the server which describes the\
            machines to be controlled.
        :type config_filename: str
        :param cold_start: \
            If False (the default), the server will attempt to restore its\
            previous state, if True, the server will start from scratch.
        :type cold_start: bool
        :param port: Which port to listen on. Defaults to 22244.
        :type port: int
        """
        # ============ STATE THAT NEEDS TO BE ALWAYS DEFINED ============

        self._cold_start = cold_start
        self._port = port

        # Should the background thread terminate?
        self._stop = False

        # Flag for checking if the server is still alive
        self._running = False

        # Currently open sockets to clients. Once server started, should only
        # be accessed from the server thread.
        self._server_socket = None

        # The server core object that the object that is persisted
        self._controller = None

        # Buffered data received from each socket
        # {fd: buf, ...}
        self._client_buffers = {}

        # ============ SUPERCLASS INITIALISATION ============

        PollingServerCore.__init__(self)
        ConfigurationReloader.__init__(self, config_filename, self.wake)

        # ============ ACTIVE OBJECTS ============

        # The background thread in which the server will run
        self._server_thread = Thread(target=self._run, name="Server Thread")

        # The current server configuration options. Once server started, should
        # only be accessed from the server thread.
        self._configuration = Configuration()

        # Infer the saved-state location
        self._state_filename = self._get_state_filename(
            self.configuration_file)

        # Attempt to restore saved state if required
        if not self._cold_start and path.isfile(self._state_filename):
            try:
                with open(self._state_filename, "rb") as f:
                    self._controller = unpickle(f)
                log.info("Server warm-starting from %s.", self._state_filename)
            except Exception:
                # Some other error occurred during unpickling.
                log.exception("Server state could not be unpacked from %s.",
                              self._state_filename)
                self._controller = None

        # Perform cold-start if no saved state was loaded
        if self._controller is None:
            log.info("Server cold-starting.")
            self._controller = Controller()

        # Notify the background thread when something changes in the background
        # of the controller (e.g. power state changes).
        self._controller.on_background_state_change = self.wake

        # Read configuration file. This must succeed when the server is first
        # being started.
        if not self.read_config_file():
            self._controller.stop()
            raise Exception("Config file could not be loaded.")

        # Start the server
        self._server_thread.start()
        self._running = True
Exemple #30
0
 def __getitem__(self, id):
     return pickle.unpickle(db.DB.__getitem__(self, id))
Exemple #31
0
 def setSecretKey(self):
     self.secretKey = p.unpickle(
         keyDistributor.getSecretKey(self.block_Id, self.username))
Exemple #32
0
 def _lookup_secondary_key(txn, db, idx, secondary_key):
     primary_key = idx.get(secondary_key.encode("unicode-escape"), None,
                           txn)
     if primary_key is not None:
         return _lookup_primary_key(txn, db, unpickle(primary_key))
Exemple #33
0
 def _lookup_primary_key(txn, db, primary_key):
     value = db.get(primary_key.encode("unicode-escape"), None, txn)
     if value is not None:
         return unpickle(value)
 def _get(self, txn, key, default):
     value = self._db.get(key.encode("unicode-escape"), None, txn)
     if value is None:
         return default
     else:
         return unpickle(value)
Exemple #35
0
 def _lookup_secondary_key(txn, db, idx, secondary_key):
     primary_key = idx.get(secondary_key.encode("unicode-escape"), None, txn)
     if primary_key is not None:
         return _lookup_primary_key(txn, db, unpickle(primary_key))
Exemple #36
0
 def _lookup_primary_key(txn, db, primary_key):
     value = db.get(primary_key.encode("unicode-escape"), None, txn)
     if value is not None:
         return unpickle(value)
Exemple #37
0
def restore_tasks():
    if app.config['STORE_FILE'] and path.exists(app.config['STORE_FILE']):
        with open(app.config['STORE_FILE'], 'rb') as f:
            return {k: Task(*v) for k, v in unpickle(f).items()}
    return {}
Exemple #38
0
def vacuum_worker(config, dbname, mode, schema=None, table=None):
    config = unpickle(config)

    with functions.get_postgres(config, dbname).connect() \
            as conn:
        return functions.vacuum(conn, dbname, mode, schema, table)
Exemple #39
0
def reindex_worker(config, dbname, schema=None, table=None, index=None):
    config = unpickle(config)

    with functions.get_postgres(config, dbname).connect() as conn:
        return functions.reindex(conn, dbname, schema, table, index)
Exemple #40
0
def analyze_worker(config, dbname, schema=None, table=None):
    config = unpickle(config)

    with functions.get_postgres(config, dbname).connect() \
            as conn:
        return functions.analyze(conn, dbname, schema, table)