Exemple #1
0
    def saveValue(self, name, value):
        '''
		Save a value with the name name and the value value.
		'''
        debug.add('saving value ' + str(name), value)
        # create a file assocation for the name to store the value
        if name not in self.names:
            debug.add('name not in self.names')
            # create a counter for the generated filename
            counter = 0
            # seed value for while loop
            newName = (str(counter) + '.value')
            # find a filename that does not already exist in
            # the database directory
            while newName in listdir(self.path):
                # increment the counter
                counter += 1
                # iterate the value
                newName = (str(counter) + '.value')
            debug.add('newname', newName)
            # set the metadata value for the filepaths in this table instance
            self.namePaths[name] = pathJoin(self.path, newName)
            # write the newly created name assocation to table metadata on disk
            writeFile(pathJoin(self.path, 'names.table'),
                      pickle(self.namePaths))
        debug.add('namePaths', self.namePaths)
        # update the length and names attributes
        self.names = self.namePaths.keys()
        self.length = len(self.names)
        # saves a table changes back onto the disk
        fileData = writeFile(self.namePaths[name], pickle(value))
        return fileData
def test(file_name, max_val, min_val, row_len):

    print 'converting the data...'
    csvfile = open('data/' + file_name + '.csv')

    ## convert test data
    test_set = []
    row_num = 0

    for row in csv.reader(csvfile):
        row_num += 1

        print '\r', row_num, '/', row_len,

        if row_num != 1:
            features = row[1: len(row)]  # 1-st column is an id of data (irrelevant)

            for column in xrange(len(features)):
                if max_val[column] != min_val[column]:
                    features[column] = (float(features[column]) - min_val[column]) / (max_val[column] - min_val[column])
                else:
                    # TBF: if max_val == min_val, that feature is no use for prediction, thus better removing the column
                    features[column] = 0.

            test_set.append(features)

    csvfile.close()
    test_set = numpy.asarray(test_set, dtype=numpy.float64)

    print
    print 'saving the data...'
    pickle(test_set, 'data/' + file_name + '.pkl')
    print 'done!'
Exemple #3
0
    def stop_and_join(self):
        """ Stop the server and wait for it to shut down completely.
        """
        log.info("Server shutting down, please wait...")

        try:
            # Shut down server thread
            self._stop = True
            self.wake()
            self._server_thread.join()
        finally:
            # Close all connections; this is the critical part as it closes
            # the server socket itself.
            log.info("Closing connections...")
            self._close()

        # Shut down the controller and flush all BMP commands
        log.info("Waiting for all queued BMP commands...")
        self._controller.stop()
        self._controller.join()

        # Dump controller state to file
        with open(self._state_filename, "wb") as f:
            pickle(self._controller, f)

        log.info("Server shut down.")

        self._running = False
Exemple #4
0
	def saveValue(self,name,value):
		'''
		Save a value with the name name and the value value.
		'''
		debug.add('saving value '+str(name),value)
		# create a file assocation for the name to store the value
		if name not in self.names:
			debug.add('name not in self.names')
			# create a counter for the generated filename
			counter=0
			# seed value for while loop
			newName = (str(counter)+'.value')
			# find a filename that does not already exist in
			# the database directory
			while newName in listdir(self.path):
				# increment the counter
				counter+=1
				# iterate the value
				newName=(str(counter)+'.value')
			debug.add('newname',newName)
			# set the metadata value for the filepaths in this table instance
			self.namePaths[name]=pathJoin(self.path,newName)
			# write the newly created name assocation to table metadata on disk
			writeFile(pathJoin(self.path,'names.table'),pickle(self.namePaths))
		debug.add('namePaths',self.namePaths)
		# update the length and names attributes
		self.names=self.namePaths.keys()
		self.length=len(self.names)
		# saves a table changes back onto the disk
		fileData=writeFile(self.namePaths[name],pickle(value))
		return fileData
Exemple #5
0
 def typesize(self, obj):
     string = repr(obj)
     try:
         pickle.pickle(obj)
     except PicklingError:
         string = str(type(obj)) + str(sys.getsizeof(obj))
     if pickle.dumps(obj) != pickle.dumps(copy.copy(obj)):
         string = str(type(obj)) + str(sys.getsizeof(obj))
     return string
Exemple #6
0
 def update_cache(self):
     """ Saves the package list in a cache file """
     try:
         if not isdir(dirname(self._cache)):
             makedirs(dirname(self._cache), mode=0o755, exist_ok=True)
         pickle(self._packages, open(self._cache, "wb"))
     except:
         self.clear_cache()
         raise CacheError(_("Could not update cache"))
Exemple #7
0
def createCloudGetAttestation(block_Id, key_block_Version_No, block_Version_No,
                              content, chain_Hash, nonce):
    key = loadSigningKey()
    block_hash = p.pickle(hash(content))
    concat = str(block_Id) + str(key_block_Version_No) + str(
        block_Version_No) + nonce
    h = hash(concat)
    sign = key.sign(h, '')
    return p.pickle(sign)
Exemple #8
0
def store_tasks(task_pool):
    with lock:
        if app.config['STORE_FILE']:
            with open(app.config['STORE_FILE'], 'wb') as f:
                pickle(
                    {k: [*v.__dict__.values()]
                     for k, v in task_pool.items()}, f, HIGHEST_PROTOCOL)
                return True
        else:
            return False
Exemple #9
0
    def update_cache(self):
        """ Saves the package list in a cache file """
        try:
            if not isdir(dirname(self._cache)):
                makedirs(dirname(self._cache), mode=0o755)

            with open(self._cache, "wb") as f:
                pickle(self._packages, f)
        except:
            self.clear_cache()
            raise CacheError(_("Could not update cache: {0}").format(self._cache))
Exemple #10
0
    def update_cache(self):
        ''' Saves the package list in a cache file '''
        try:
            if not isdir(dirname(self._cache)):
                makedirs(dirname(self._cache), mode=0o755)

            with open(self._cache, 'wb') as f:
                pickle(self._packages, f)
        except:
            self.clear_cache()
            raise CacheError(
                _('Could not update cache: {0}').format(self._cache))
def train(file_name, row_len):

    train_set_x = []
    train_set_y = []

    print 'converting the data... (%s)' % file_name
    csvfile = open('data/' + file_name + '.csv')
    row_num = 0

    for row in csv.reader(csvfile):
        row_num += 1

        print '\r', row_num, '/', row_len,
        #features = row[1: len(row) - 1]  # 1-st column is an ID of data (irrelevant)
        #print features[521], features[271]

        if row_num != 1:
            features = row[1: len(row) - 1]  # 1-st column is an ID of data (irrelevant)

            # for column in xrange(len(features)):
            #     if max_val[column] != min_val[column]:
            #         features[column] = (float(features[column]) - min_val[column]) / (max_val[column] - min_val[column])
            #     else:
            #         # TBF: if max_val == min_val, that feature is no use for prediction, thus better removing the column
            #         features[column] = 0.
            feature = []
            if features[521] == 'NA' or features[271] == 'NA':
                features[521] = 0
                features[271] = 0

            value = float(features[521]) - float(features[271])
            feature.append(value)
            #feature.append((value - min_val) / (max_val - min_val))

            train_set_x.append(feature)

            loss = row[-1]
            # if int(loss) > 0:
            #     loss = 1
            # else:
            #     loss = 0

            train_set_y.append(int(loss))

    csvfile.close()
    train_set_x = numpy.asarray(train_set_x, dtype=numpy.float64)
    train_set = (train_set_x, train_set_y)

    print
    print 'saving the data...'
    pickle(train_set, 'data/' + file_name + '_f528_f274.pkl')
    print 'done!'
Exemple #12
0
def test_pickle_dotdict():
    from pickle import dumps as pickle, loads as unpickle
    from sampleproject.toolkit.utils import DotDict

    orig = DotDict(dict(a=1, b=dict(c=2)))
    copy = unpickle(pickle(orig))
    assert 2 == copy.b.c
Exemple #13
0
def test_pickle():
    from pickle import dumps as pickle, loads as unpickle
    from temboardagent.postgres import Postgres

    orig = Postgres(host='myhost')
    copy = unpickle(pickle(orig))
    assert 'myhost' == copy.host
def hello_task_manager_bootstrap(context):
    yield taskmanager.Task(
        worker_name=say_hello_worker.__name__,
        id=say_hello_worker.__name__,
        options={'pickled_app': pickle(APP)},
        redo_interval=APP.config.hello.background_worker_interval,
    )
Exemple #15
0
	def __init__(self,path):
		'''
		DB table to store things as files and directories. This is
		designed to reduce ram usage when reading things from large
		databases. Specifically this is designed for caches.

		# variables #
		.path
		  The path on the filesystem where the table is stored.
		.names
		  Gives you a list containing the names of all stored
		  values as strings.
		.namePaths
		  Gives you a dict where the keys are the names and
		  the value is the path of that value database file
		.length
		  The length of names stored in this table
		'''
		# path of the root of the cache, this is where files
		# will be stored on the system
		self.path=path
		# create the paths if they do not exist
		if not pathExists(self.path):
			makedirs(self.path)
		debug.add('table path',self.path)
		# the path prefix is for tables stored in tables
		self.pathPrefix=''
		# tables are stored as files
		tempTable=[]
		# array of all the value names stored on table
		namesPath=pathJoin(self.path,'names.table')
		# if no namepaths exist create them
		if not pathExists(pathJoin(namesPath)):
			# write the new value to the system
			writeFile(namesPath,pickle(dict()))
		# load the name paths
		self.namePaths=unpickle(loadFile(namesPath))
		debug.add('self.namePaths',self.namePaths)
		# create a array of all the names of values stored
		self.names=self.namePaths.keys()
		debug.add('self.names',self.names)
		# length of all the values stored on the table
		self.length=len(self.names)
		debug.add('self.length',self.length)
		# the protected list is a array of names that are
		# protected from limit cleaning
		protectedPath=pathJoin(self.path,'protected.table')
		if pathExists(pathJoin(protectedPath)):
			# load the list
			self.protectedList=unpickle(loadFile(protectedPath))
		else:
			# create a blank list
			self.protectedList=[]
		# limit value to limit the number of values
		# load the limit value from file if it exists
		limitPath=pathJoin(self.path,'limit.table')
		if pathExists(pathJoin(limitPath)):
			self.limit=unpickle(loadFile(limitPath))
		else:
			self.limit=None
Exemple #16
0
def test_pickle_dotdict():
    from pickle import dumps as pickle, loads as unpickle
    from temboardagent.utils import DotDict

    orig = DotDict(dict(a=1, b=dict(c=2)))
    copy = unpickle(pickle(orig))
    assert 2 == copy.b.c
Exemple #17
0
    def __init__(self, path):
        '''
		DB table to store things as files and directories. This is
		designed to reduce ram usage when reading things from large
		databases. Specifically this is designed for caches.

		# variables #
		.path
		  The path on the filesystem where the table is stored.
		.names
		  Gives you a list containing the names of all stored
		  values as strings.
		.namePaths
		  Gives you a dict where the keys are the names and
		  the value is the path of that value database file
		.length
		  The length of names stored in this table
		'''
        # path of the root of the cache, this is where files
        # will be stored on the system
        self.path = path
        # create the paths if they do not exist
        if not pathExists(self.path):
            makedirs(self.path)
        debug.add('table path', self.path)
        # the path prefix is for tables stored in tables
        self.pathPrefix = ''
        # tables are stored as files
        tempTable = []
        # array of all the value names stored on table
        namesPath = pathJoin(self.path, 'names.table')
        # if no namepaths exist create them
        if not pathExists(pathJoin(namesPath)):
            # write the new value to the system
            writeFile(namesPath, pickle(dict()))
        # load the name paths
        self.namePaths = unpickle(loadFile(namesPath))
        debug.add('self.namePaths', self.namePaths)
        # create a array of all the names of values stored
        self.names = self.namePaths.keys()
        debug.add('self.names', self.names)
        # length of all the values stored on the table
        self.length = len(self.names)
        debug.add('self.length', self.length)
        # the protected list is a array of names that are
        # protected from limit cleaning
        protectedPath = pathJoin(self.path, 'protected.table')
        if pathExists(pathJoin(protectedPath)):
            # load the list
            self.protectedList = unpickle(loadFile(protectedPath))
        else:
            # create a blank list
            self.protectedList = []
        # limit value to limit the number of values
        # load the limit value from file if it exists
        limitPath = pathJoin(self.path, 'limit.table')
        if pathExists(pathJoin(limitPath)):
            self.limit = unpickle(loadFile(limitPath))
        else:
            self.limit = None
Exemple #18
0
    def deleteValue(self, name):
        '''
		Delete a value with name name.
		'''
        # clean up names to avoid stupid
        debug.add('deleting value ', name)
        # figure out the path to the named value file
        if name in self.names:
            filePath = self.namePaths[name]
            # remove the metadata entry
            del self.namePaths[name]
            # write changes to database metadata file
            writeFile(pathJoin(self.path, 'names.table'),
                      pickle(self.namePaths))
            # update the length and names attributes
            self.names = self.namePaths.keys()
            self.length = len(self.names)
        else:
            return False
        if pathExists(filePath):
            # remove the file accocated with the value
            removeFile(filePath)
            return True
        else:
            return False
Exemple #19
0
 def createClientPutAttestation(self):
     self.setSecretKey()
     self.setNewkey_block_Version_No()
     self.encryptAndEncode()
     self.blockSigningKey = keyDistributor.getSigningKey(
         self.block_Id, self.username)
     if self.blockSigningKey == 0:
         return 0
     self.blockSigningKey = RSA.importKey(self.blockSigningKey)
     self.block_hash = SHA256.new(self.b64).hexdigest()
     self.block_hashPickled = p.pickle(self.block_hash)
     self.concat = str(self.block_Id) + str(
         p.pickle(self.key_block_Version_No)) + str(self.block_Version_No)
     self.hashOfElements = SHA256.new(self.concat).hexdigest()
     self.clientPutAttestation = self.blockSigningKey.sign(
         self.hashOfElements, '')
     return 1
Exemple #20
0
def put(client_Put_Attest, block_Id, key_block_Version_No, new_Version_No,
        New_Hash, content, hashSign, old_Hash):
    global staleStorage
    global storage
    #verifySignature is used for integrity check of message. to make sure that privileged user sent this data update
    if verifySignature(block_Id, content, hashSign) == 1:
        print("Content Integrity Verified")
        #TODO Check write serializability. match hash(stored content) with new hash.
        if storage.has_key(block_Id):
            [
                currBlock_Version_No, currContent, currHash, currHashSign,
                currKey_block_Version_No, currClient_Put_Attest, chain_Hash
            ] = storage[block_Id]
            #Cloud ensures write-serializibility  by checking if block_version number is +1
            if not (new_Version_No == currBlock_Version_No + 1
                    and old_Hash == currHash):
                return [
                    0,
                    "Cloud:Content already modified by other user. Try again",
                    'no chain hash'
                ]
        else:  #If this is new item. we assume first item from client will come with version number 1
            currBlock_Version_No = 0
            currHash = ''
            currHashSign = ''
            chain_Hash = ''


#write serializibility assurance. checking if old_hash given by client is same as currHash stored in storage
#        if not old_Hash==currHash:
#            return [0,"Cloud:Data has been updated, plz read again!",'no chain hash']
        [verifySuccessfullflag, cloudPutAttestation
         ] = verifyClientPutAttestation(block_Id, key_block_Version_No,
                                        new_Version_No, New_Hash, content,
                                        client_Put_Attest, currHash,
                                        currHashSign)
        new_chain_Hash = hash(str(cloudPutAttestation) + str(chain_Hash))
        if verifySuccessfullflag == 1:
            print("Attestation Verified!")
            #stale storage will be used to simulate fork attack. giving back stale data to reader.
            #            if storage.has_key(block_Id):
            #                staleStorage[block_Id]=storage[block_Id]
            storage[block_Id] = [
                new_Version_No, content, New_Hash, hashSign,
                key_block_Version_No, client_Put_Attest, new_chain_Hash
            ]
            #TODO store client put attestation somwhere.
            cloudPutAttestation = p.pickle(cloudPutAttestation)
            return [1, cloudPutAttestation, new_chain_Hash]
        else:
            print("Attestation Failed! put operation denied")
            return [
                0, "Cloud:Attestation Failed! put operation denied",
                'no chain hash'
            ]
    else:
        print "Content Integrity check failed!"
        return [0, "Cloud:Content Integrity check failed!", 'no chain hash']
Exemple #21
0
def test_app_pickle():
    from pickle import dumps as pickle, loads as unpickle
    from sampleproject.toolkit.app import BaseApplication

    empty_generator = (x for x in [])
    orig = BaseApplication(specs=empty_generator)
    orig.config.update(dict(a=1))
    copy = unpickle(pickle(orig))
    assert copy.config
def test(file_name, row_len):

    print 'converting the data... (%s)' % file_name
    csvfile = open('data/' + file_name + '.csv')

    ## convert test data
    test_set = []
    row_num = 0

    for row in csv.reader(csvfile):
        row_num += 1

        print '\r', row_num, '/', row_len,

        if row_num != 1:
            features = row[1: len(row)]  # 1-st column is an id of data (irrelevant)

            # for column in xrange(len(features)):
            #     if max_val[column] != min_val[column]:
            #         features[column] = (float(features[column]) - min_val[column]) / (max_val[column] - min_val[column])
            #     else:
            #         # TBF: if max_val == min_val, that feature is no use for prediction, thus better removing the column
            #         features[column] = 0.

            feature = []

            if features[521] == 'NA' or features[271] == 'NA':
                features[521] = 0
                features[271] = 0

            value = float(features[521]) - float(features[271])
            feature.append(value)
            #feature.append((value - min_val) / (max_val - min_val))

            test_set.append(feature)


    csvfile.close()
    test_set = numpy.asarray(test_set, dtype=numpy.float64)

    print
    print 'saving the data...'
    pickle(test_set, 'data/' + file_name + '_f528_f274.pkl')
    print 'done!'
Exemple #23
0
def test_app_pickle():
    from pickle import dumps as pickle, loads as unpickle
    from temboardagent.cli import Application

    empty_generator = (x for x in [])
    orig = Application(specs=empty_generator)
    orig.config.update(dict(a=1))
    copy = unpickle(pickle(orig))
    assert [] == copy.specs
    assert copy.config
Exemple #24
0
 def sendobj(self, obj):
     data = pickle(obj)
     buf = pack('I', len(data)) + data
     sent = 0
     while sent < len(buf):
         part = self.send(buf[sent:])
         if part == 0:
             raise ConnectionError('Cannot send all {} bytes'.format(
                 len(buf)))
         sent += part
Exemple #25
0
def init_poll_kings():
    """
    Load the cached timer data if it exists
    """
    global hash_crack_king_timers
    if path.exists(hash_crack_king_timers_pickle):
        with open(hash_crack_king_timers_pickle, 'rb+') as FILE:
            try:
                hash_crack_king_timers = pickle(FILE)
            except EOFError:
                pass
Exemple #26
0
 def verifyCloudGetAttestation(self):
     self.cloudPublicKey = p.unpickle(cloudStorage.getPublicKey())
     self.block_hashPickled = p.pickle(self.block_hash)
     self.key_block_Version_NoPickled = p.pickle(self.key_block_Version_No)
     self.concat = str(self.block_Id) + str(
         self.key_block_Version_NoPickled) + str(
             self.block_Version_No) + self.nonce
     self.hashOfElements = SHA256.new(self.concat).hexdigest()
     #Verifying verification of attestation signature
     if (self.cloudPublicKey.verify(self.hashOfElements,
                                    p.unpickle(self.cloud_Get_Attest))):
         #sending to auditor
         auditorReply = keyDistributor.putAttestations(
             self.username, "cloudgetattestation", self.block_Id,
             self.block_Version_No, self.cloud_Get_Attest,
             self.key_block_Version_NoPickled, self.block_hashPickled,
             self.chain_Hash)
         return 1
     else:
         return 0
Exemple #27
0
def hadoop_feature_line(id, properties, geometry):
    ''' Convert portions of a GeoJSON feature to a single line of text.
    
        Allows Hadoop to stream features from the mapper to the reducer.
        See also skeletron-hadoop-mapper.py and skeletron-hadoop-reducer.py.
    '''
    line = [
        json_encode(id), ' ',
        b64encode(pickle(sorted(list(properties.items())))), '\t',
        b64encode(geometry.wkb)
    ]

    return ''.join(line)
def train(file_name, max_val, min_val, row_len):

    train_set_x = []
    train_set_y = []

    print 'converting the data... (%s)' % file_name
    csvfile = open('data/' + file_name + '.csv')
    row_num = 0

    for row in csv.reader(csvfile):
        row_num += 1

        print '\r', row_num, '/', row_len,

        if row_num != 1:
            features = row[1: len(row) - 1]  # 1-st column is an ID of data (irrelevant)

            for column in xrange(len(features)):
                if max_val[column] != min_val[column]:
                    features[column] = (float(features[column]) - min_val[column]) / (max_val[column] - min_val[column])
                else:
                    # TBF: if max_val == min_val, that feature is no use for prediction, thus better removing the column
                    features[column] = 0.

            train_set_x.append(features)

            loss = row[-1]
            train_set_y.append(int(loss))

    csvfile.close()
    train_set_x = numpy.asarray(train_set_x, dtype=numpy.float64)
    train_set = (train_set_x, train_set_y)

    print
    print 'saving the data...'
    pickle(train_set, 'data/' + file_name + '.pkl')
    print 'done!'
Exemple #29
0
def hadoop_feature_line(id, properties, geometry):
    ''' Convert portions of a GeoJSON feature to a single line of text.
    
        Allows Hadoop to stream features from the mapper to the reducer.
        See also skeletron-hadoop-mapper.py and skeletron-hadoop-reducer.py.
    '''
    line = [
        json_encode(id),
        ' ',
        b64encode(pickle(sorted(list(properties.items())))),
        '\t',
        b64encode(geometry.wkb)
        ]
    
    return ''.join(line)
Exemple #30
0
    def put(self, client_Put_Attest, block_Id, key_block_Version_No,
            new_Version_No, New_Hash, content, hashSign):
        self.key_block_Version_NoPickled = p.pickle(self.key_block_Version_No)
        [returnCode, cloudReply,
         chain_Hash] = cloudStorage.put(client_Put_Attest, self.block_Id,
                                        self.key_block_Version_NoPickled,
                                        new_Version_No, New_Hash, self.b64,
                                        hashSign, self.old_Hash)
        if returnCode == 1:
            auditorReply = keyDistributor.putAttestations(
                self.username, "cloudputattestation", self.block_Id,
                self.block_Version_No, cloudReply,
                self.key_block_Version_NoPickled, self.block_hashPickled,
                chain_Hash)

#            keyDistributor.putAttestations(self.username,"clientputattestation",self.block_Id,self.block_Version_No,client_Put_Attest)
        return [returnCode, cloudReply, chain_Hash]
Exemple #31
0
def schedule_operation(what, when, config, expire=86400):
    options = {'config': pickle(config)}
    try:
        res = taskmanager.schedule_task(
            what + '_worker',
            options=options,
            start=when,
            listener_addr=str(os.path.join(config.temboard.home,
                                           '.tm.socket')),
            expire=expire,
        )
    except Exception as e:
        logger.exception(str(e))
        raise HTTPError(500, "Unable to schedule {}".format(what))

    if res.type == taskmanager.MSG_TYPE_ERROR:
        logger.error(res.content)
        raise HTTPError(500, "Unable to schedule {}".format(what))

    return res.content
Exemple #32
0
    def setProtected(self, name):
        '''
		Set a name in the table to be protected from removal
		because of limits.
		'''
        # generate the filepath to the protected values
        # list
        filePath = pathJoin(self.path, 'protected.table')
        # check if the path exists
        if pathExists(filePath):
            # read the protected list from the file
            protectedList = unpickle(loadFile(filePath))
        else:
            # create the list and append the name
            protectedList = []
        # append the new value to the list
        protectedList.append(name)
        # pickle the protected list for storage
        protectedList = pickle(protectedList)
        # write the changes back to the protected list
        writeFile(filePath, protectedList)
Exemple #33
0
	def setProtected(self,name):
		'''
		Set a name in the table to be protected from removal
		because of limits.
		'''
		# generate the filepath to the protected values
		# list
		filePath=pathJoin(self.path,'protected.table')
		# check if the path exists
		if pathExists(filePath):
			# read the protected list from the file
			protectedList=unpickle(loadFile(filePath))
		else:
			# create the list and append the name
			protectedList=[]
		# append the new value to the list
		protectedList.append(name)
		# pickle the protected list for storage
		protectedList=pickle(protectedList)
		# write the changes back to the protected list
		writeFile(filePath,protectedList)
Exemple #34
0
 def get_view(self, position):
     """Returns all bins covered by a screen at the given position.
     
     Each bin is 16-by-16 pixels.
     The screen is 800x640 pixels which means that it covers 50 horizontal
     and 40 vertical bins when aligned. When not aligned, it covers 51
     horizontal and 41 vertical bins, so we'll go ahead and notify it of
     that many bins.
     
     Arguments:
     
         position: upperleft pixel position
     """
     x, y = floor(position / pixels_per_bin)
     map_slice = self.entity_map[x:x+51, y:y+41]
     entities = set()
     for item in map_slice:
         entities.add(item)
     
     # Now we want to sort the entities first by z layer, then by the y
     # position of their "base" (y position plus height).
     entities = sorted(entities, key=Entity.sorting_key)
     
     # What the client wants to know is: which sprite corresponds to which
     # entity? Each sprite/entity should share some sort of ID. This lets the
     # client decide how to animate and avoid allocating new sprite objects
     # all the time. The sprite ID is just the hash of the entity object.
     
     # Now consider the sprites objects which the client hasn't created yet.
     # We need to send the name of the sprite (which determines the
     # subdirectory in which we find images for that sprite).
     
     def relevant_info(entity):
         hash_ = hash(entity)
         name = entity.spritename
         relative_position = entity.position - position
         return (hash_, name, relative_position)
     
     return pickle.pickle(map(entities, relevant_info))
Exemple #35
0
	def deleteValue(self,name):
		'''
		Delete a value with name name.
		'''
		# clean up names to avoid stupid
		debug.add('deleting value ',name)
		# figure out the path to the named value file
		if name in self.names:
			filePath=self.namePaths[name]
			# remove the metadata entry
			del self.namePaths[name]
			# write changes to database metadata file
			writeFile(pathJoin(self.path,'names.table'),pickle(self.namePaths))
			# update the length and names attributes
			self.names=self.namePaths.keys()
			self.length=len(self.names)
		else:
			return False
		if pathExists(filePath):
			# remove the file accocated with the value
			removeFile(filePath)
			return True
		else:
			return False
Exemple #36
0
def schedule_operation(operation_type,
                       conn,
                       database,
                       datetimeutc,
                       app,
                       table=None,
                       index=None,
                       schema=None,
                       **kwargs):
    # Schedule a maintenance operation (vacuum or analyze) statement through
    # background worker

    if table:
        check_table_exists(conn, schema, table)
    if index:
        check_index_exists(conn, schema, index)

    # Schedule a new task to background worker

    # We need to build a uniq id for this task to avoid scheduling twice the
    # same statement.
    m = hashlib.md5()
    m.update("{database}:{schema}:{table}{index}:{datetime}:{operation_type}".
             format(database=database,
                    schema=schema or '',
                    table=table or '',
                    index=index or '',
                    datetime=datetimeutc,
                    operation_type=operation_type).encode('utf-8'))
    # Task scheduling
    try:
        # Convert string datetime to datetime object
        dt = datetime.strptime(datetimeutc, '%Y-%m-%dT%H:%M:%SZ')

        options = {
            'config': pickle(app.config),
            'dbname': database,
        }
        if schema:
            options['schema'] = schema
        if table:
            options['table'] = table
        if index:
            options['index'] = index
        if 'mode' in kwargs:
            options['mode'] = kwargs['mode']

        res = taskmanager.schedule_task(
            operation_type + '_worker',
            id=m.hexdigest()[:8],
            options=options,
            # We add one microsecond here to be compliant with scheduler
            # datetime format expected during task recovery
            start=(dt + timedelta(microseconds=1)),
            listener_addr=str(
                os.path.join(app.config.temboard.home, '.tm.socket')),
            expire=0,
        )
    except Exception as e:
        logger.exception(str(e))
        raise HTTPError(500, "Unable to schedule %s" % operation_type)

    if res.type == taskmanager.MSG_TYPE_ERROR:
        logger.error(res.content)
        raise HTTPError(500, "Unable to schedule %s" % operation_type)

    return res.content
def evaluate_lenet5(learning_rate=0.1, n_epochs=500, L1_reg=0.00, L2_reg=0.00001, # L2: 0.00001
					dataset='cifar-10-batches-py',
					nkerns=[6, 16, 30], batch_size=20, mode='train', amount='full'): # nkerns=[6, 16], batch_size=100
	""" Demonstrates lenet on MNIST dataset

	:type learning_rate: float
	:param learning_rate: learning rate used (factor for the stochastic
						  gradient)

	:type n_epochs: int
	:param n_epochs: maximal number of epochs to run the optimizer

	:type dataset: string
	:param dataset: path to the dataset used for training /testing (MNIST here)

	:type nkerns: list of ints
	:param nkerns: number of kernels on each layer
	"""

	#learning_rate = theano.shared(value=learning_rate, borrow=True)

	rng = numpy.random.RandomState(23455)

	datasets = load_data(dataset, mode=mode, amount=amount)

	if mode == 'train':
		train_set_x, train_set_y = datasets[0]
		valid_set_x, valid_set_y = datasets[1]
	else:
		test_set_x, test_set_y = datasets[0]

	# compute number of minibatches for training, validation and testing
	if mode == 'train':
		n_train_batches = train_set_x.get_value(borrow=True).shape[0]
		n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
		n_train_batches /= batch_size
		n_valid_batches /= batch_size
	else:
		n_test_batches = test_set_x.get_value(borrow=True).shape[0]
		n_test_batches /= batch_size

	# allocate symbolic variables for the data
	index = T.lscalar()  # index to a [mini]batch
	x = T.matrix('x')   # the data is presented as rasterized images
	y = T.ivector('y')  # the labels are presented as 1D vector of
						# [int] labels

	ishape = (3, 32, 32)  # this is the size of CIFIA-10 images (scaled)

	######################
	# BUILD ACTUAL MODEL #
	######################
	print '... building the model'

	# Reshape matrix of rasterized images of shape (batch_size,32*32)
	# to a 4D tensor, compatible with our LeNetConvPoolLayer
	layer0_input = x.reshape((batch_size, 3, 32, 32))

	# Construct the first convolutional pooling layer:
	# filtering reduces the image size to (32-5+1,32-5+1)=(28,28)
	# maxpooling reduces this further to (28/2,28/2) = (14,14)
	# 4D output tensor is thus of shape (batch_size,nkerns[0],14,14)
	layer0 = LeNetConvPoolLayer(rng, input=layer0_input,
			image_shape=(batch_size, 3, 32, 32),
			filter_shape=(nkerns[0], 3, 7, 7), poolsize=(2, 2))

	# Construct the second convolutional pooling layer
	# filtering reduces the image size to (14-5+1,14-5+1)=(10,10)
	# maxpooling reduces this further to (10/2,10/2) = (5,5)
	# 4D output tensor is thus of shape (nkerns[0],nkerns[1],5,5)
	layer1 = LeNetConvPoolLayer(rng, input=layer0.output,
			image_shape=(batch_size, nkerns[0], 13, 13),
			filter_shape=(nkerns[1], nkerns[0], 6, 6), poolsize=(2, 2))

	layer2 = LeNetConvPoolLayer(rng, input=layer1.output,
			image_shape=(batch_size, nkerns[1], 4, 4),
			filter_shape=(nkerns[2], nkerns[1], 2, 2), poolsize=(1, 1))

	'''
	# the HiddenLayer being fully-connected, it operates on 2D matrices of
	# shape (batch_size,num_pixels) (i.e matrix of rasterized images).
	# This will generate a matrix of shape (20,50*5*5) = (20,1250) <-??
	layer2_input = layer1.output.flatten(2)

	# construct a fully-connected sigmoidal layer
	layer2 = HiddenLayer(rng, input=layer2_input, n_in=nkerns[1] * 5 * 5,
						 n_out=500, activation=T.tanh)

	'''

	layer3_input = layer2.output.flatten(2)

	# construct a fully-connected sigmoidal layer
	layer3 = HiddenLayer(rng, input=layer3_input, n_in=nkerns[2] * 3 * 3,
						 n_out=64, activation=T.tanh)

	# classify the values of the fully-connected sigmoidal layer
	layer4 = LogisticRegression(input=layer3.output, n_in=64, n_out=10)

	## load the saved parameters
	if mode == 'test':
		learned_params = unpickle('params/convolutional_mlp_3.pkl')

	# calculate L1 and L2 norm
	L1 = abs(layer0.W).sum() + abs(layer1.W).sum() + abs(layer2.W).sum() + abs(layer3.W).sum()
	L2_sqr = (layer0.W ** 2).sum() + (layer1.W ** 2).sum() + (layer2.W ** 2).sum() + (layer3.W ** 2).sum()

	# the cost we minimize during training is the NLL of the model
	cost = layer4.negative_log_likelihood(y)# + L1_reg * L1 + L2_reg * L2_sqr

	# create a function to compute the mistakes that are made by the model
	if mode == 'test':
		test_model = theano.function([index], layer4.errors(y),
				givens={
					x: test_set_x[index * batch_size: (index + 1) * batch_size],
					y: test_set_y[index * batch_size: (index + 1) * batch_size]})
	else:
		validate_model = theano.function([index], layer4.errors(y),
				givens={
					x: valid_set_x[index * batch_size: (index + 1) * batch_size],
					y: valid_set_y[index * batch_size: (index + 1) * batch_size]})

		'''
		check_label = theano.function(inputs=[index],
				outputs=layer4.y_pair(y),
					givens={
						x: train_set_x[index * batch_size: (index + 1) * batch_size],
						y: train_set_y[index * batch_size: (index + 1) * batch_size]})
		'''

	# create a function to get the labels predicted by the model
	if mode == 'test':
		get_test_labels = theano.function([index], layer4.y_pred,
				givens={
					x: test_set_x[index * batch_size: (index + 1) * batch_size],
					layer0.W: learned_params[0],
					layer0.b: learned_params[1],
					layer1.W: learned_params[2],
					layer1.b: learned_params[3],
					layer2.W: learned_params[4],
					layer2.b: learned_params[5],
					layer3.W: learned_params[6],
					layer3.b: learned_params[7],
					layer4.W: learned_params[8],
					layer4.b: learned_params[9]})


	if mode == 'train':
		# create a list of all model parameters to be fit by gradient descent
		params = layer4.params + layer3.params + layer2.params + layer1.params + layer0.params
	
		# create a list of gradients for all model parameters
		grads = T.grad(cost, params)

	# train_model is a function that updates the model parameters by
	# SGD Since this model has many parameters, it would be tedious to
	# manually create an update rule for each model parameter. We thus
	# create the updates list by automatically looping over all
	# (params[i],grads[i]) pairs.
	if mode == 'train':
		updates = []
		for param_i, grad_i in zip(params, grads):
			updates.append((param_i, param_i - learning_rate * grad_i))

	if mode == 'train':
		train_model = theano.function([index], cost, updates=updates,
			  givens={
				x: train_set_x[index * batch_size: (index + 1) * batch_size],
				y: train_set_y[index * batch_size: (index + 1) * batch_size]})
		
	###############
	# TRAIN MODEL #
	###############
	print '... training the model'
	# early-stopping parameters
	if mode == 'train':
		patience = 10000  # look as this many examples regardless
		patience_increase = 2  # wait this much longer when a new best is
							   # found
		improvement_threshold = 0.999  # a relative improvement of this much is
									   # considered significant
		validation_frequency = min(n_train_batches, patience / 2)
								  # go through this many
								  # minibatche before checking the network
								  # on the validation set; in this case we
								  # check every epoch

	start_time = time.clock()

	if mode == 'train':
		best_params = None
		best_validation_loss = numpy.inf
		best_iter = 0
		test_score = 0.
		done_looping = False
	else:
		done_looping = True

	epoch = 0

	while (epoch < n_epochs) and (not done_looping):
		epoch = epoch + 1
		for minibatch_index in xrange(n_train_batches):

			iter = (epoch - 1) * n_train_batches + minibatch_index

			if iter % 100 == 0:
				print 'training @ iter = ', iter

			cost_ij = train_model(minibatch_index)

			## check the contents of predictions occasionaly
			'''
			if iter % 100 == 0:
				[prediction, true_label] = check_label(minibatch_index)
				print 'prediction:'
				print prediction
				print 'true_label:'
				print true_label
			'''


			if (iter + 1) % validation_frequency == 0:

				# compute zero-one loss on validation set
				validation_losses = [validate_model(i) for i
									 in xrange(n_valid_batches)]
				this_validation_loss = numpy.mean(validation_losses)
				print('epoch %i, minibatch %i/%i, validation error %f %%' % \
					  (epoch, minibatch_index + 1, n_train_batches, \
					   this_validation_loss * 100.))

				pickle(cost_ij, 'data/cost.pkl')

				# if we got the best validation score until now
				if this_validation_loss < best_validation_loss:

					## save the parameters
					if mode == 'train':
						get_params = theano.function(inputs=[], outputs=[layer0.W, layer0.b, layer1.W, layer1.b, layer2.W, layer2.b, layer3.W, layer3.b, layer4.W, layer4.b])
						save_parameters(get_params(), 'convolutional_mlp_3')

					#improve patience if loss improvement is good enough
					if this_validation_loss < best_validation_loss *  \
					   improvement_threshold:
						patience = max(patience, iter * patience_increase)

					# save best validation score and iteration number
					best_validation_loss = this_validation_loss
					best_iter = iter

					'''
					# test it on the test set
					test_losses = [test_model(i) for i in xrange(n_test_batches)]
					test_score = numpy.mean(test_losses)
					print(('	 epoch %i, minibatch %i/%i, test error of best '
						   'model %f %%') %
						  (epoch, minibatch_index + 1, n_train_batches,
						   test_score * 100.))
					'''


			if patience <= iter:
				done_looping = True
				break


	if mode == 'test':
		print 'predicting the labels...'
		pred_labels = [[0 for j in xrange(batch_size)] for i in xrange(n_test_batches)]
		for i in xrange(n_test_batches):
			print str(i+1), '/', str(n_test_batches)
			pred_labels[i] = get_test_labels(i)

		writer = csv.writer(file('result/convolutional_mlp_3.csv', 'w'))
		row = 1

		print 'output test labels...'
		for i in xrange(len(pred_labels)):
			print str(i+1), '/', str(len(pred_labels))
			for j in xrange(len(pred_labels[i])):
				writer.writerow([row, pred_labels[i][j]])
				row += 1


	end_time = time.clock()
	if mode == 'train':
		print('Optimization complete.')
		print('Best validation score of %f %% obtained at iteration %i,'\
			  'with test performance %f %%' %
			  (best_validation_loss * 100., best_iter + 1, test_score * 100.))
	print >> sys.stderr, ('The code for file ' +
						  os.path.split(__file__)[1] +
						  ' ran for %.2fm' % ((end_time - start_time) / 60.))
Exemple #38
0
def getSecretKey(block_Id, user):
    return p.pickle(blockKeys[block_Id])
Exemple #39
0
 def test_04_pickle(self):
   'util.symbol: pickling'
   cucumber = symbol.cucumber
   gerkin = pickle(cucumber,HIGHEST_PROTOCOL) #NOTE: MUST be protocol 2+
   veggie = unpickle(gerkin)
   assert veggie is cucumber
	# the number of test images
	image_num = 300000
	batch_size = 1000.
	batch_num = int(image_num / batch_size)

	color_data = [[0 for j in xrange(0,3072)] for i in xrange(0,int(batch_size))]
	
	print 'converting image data into pixel...'

	for i in xrange(0, batch_num):
		print str(i+1), '/', str(batch_num)

		for j in xrange(0, int(batch_size)):
			file_num = str(int(i * batch_size + (j + 1)))
			img = Image.open('test_image/' + file_num + '.png')
		
			rgbimg = img.convert("RGB")
			rgb = numpy.asarray(rgbimg.getdata(), dtype=numpy.float64)
			
			for k in xrange(0, len(rgb)):
				color_data[j][k] = rgb[k][0] / 255.
				color_data[j][k + 1024] = rgb[k][1] / 255.
				color_data[j][k + 2048] = rgb[k][2] / 255.
	
		color_data = numpy.asarray(color_data)
		file_num = str(i + 1)
		pickle(color_data, 'data/test_set_' + file_num + '.pkl')
	
	print 'done!'
	
Exemple #41
0
def test_dA(learning_rate=0.1, training_epochs=20,
			dataset='mnist.pkl.gz',
			batch_size=20, output_folder='dA_data', mode='train', amount='full'):

	"""
	This demo is tested on MNIST

	:type learning_rate: float
	:param learning_rate: learning rate used for training the DeNosing
						  AutoEncoder

	:type training_epochs: int
	:param training_epochs: number of epochs used for training

	:type dataset: string
	:param dataset: path to the picked dataset

	"""
	datasets = load_data(dataset, mode, amount)
	train_set_x, train_set_y = datasets[0]

	# compute number of minibatches for training, validation and testing
	n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size

	# allocate symbolic variables for the data
	index = T.lscalar()	# index to a [mini]batch
	x = T.matrix('x')  # the data is presented as rasterized images

	'''
	if not os.path.isdir(output_folder):
		os.makedirs(output_folder)
	os.chdir(output_folder)
	'''
	######################
	# BUILDING THE MODEL #
	######################

	for noize in [0, 10, 20, 30, 40, 50]:
		print 'noize:', str(noize), '%'

		rng = numpy.random.RandomState(123)
		theano_rng = RandomStreams(rng.randint(2 ** 30))

		da = dA(numpy_rng=rng, theano_rng=theano_rng, input=x,
				n_visible=32 * 32, n_hidden=784) # same as MNIST (28*28=784)

		cost, updates = da.get_cost_updates(corruption_level=noize/100.,
											learning_rate=learning_rate)

		train_da = theano.function([index], cost, updates=updates,
			 givens={x: train_set_x[index * batch_size:
									(index + 1) * batch_size]})
		
		comp_data = da.get_comp_values()

		get_comp_data = theano.function([index], comp_data,
			 givens={x: train_set_x[index * batch_size:
									(index + 1) * batch_size]})

		start_time = time.clock()

		############
		# TRAINING #
		############

		# go through training epochs
		for epoch in xrange(training_epochs):
			# go through trainng set
			c = []
			for batch_index in xrange(n_train_batches):
				c.append(train_da(batch_index))

			print 'Training epoch %d, cost ' % epoch, numpy.mean(c)

		end_time = time.clock()

		training_time = (end_time - start_time)

		print >> sys.stderr, ('The ' + str(noize)  +'% corruption code for file ' +
							  os.path.split(__file__)[1] +
							  ' ran for %.2fm' % ((training_time) / 60.))

		## save parameters
		get_params = theano.function(inputs=[], outputs=[da.W, da.b, da.b_prime])
		save_parameters(get_params(), 'dA_' + str(noize))

		## save compressed data (no corruption)
		data_da_0 = [[0 for j in xrange(28*28)] for i in xrange(n_train_batches*batch_size)]
		
		for batch_index in xrange(n_train_batches):
			comp_x = get_comp_data(batch_index)
		
			for i in xrange(batch_size):
				comp_x[i] = numpy.asarray(comp_x[i], dtype=numpy.float64)
				data_da_0[batch_index * batch_size + i] = comp_x[i]
		
		data_da_0 = numpy.asarray(data_da_0)
		pickle(data_da_0, 'dA_data/' + mode + '_data_da_' + str(noize) + '.pkl')
Exemple #42
0
    def process_message(self, msg, waiting=False):
        '''
        Process a remote message.
        <msg> is a backend.Message object

        If <waiting> is True it indicates a main loop is waiting for something,
        in which case signals get queued.
        '''

        # Decode message
        try:
            info = pickle.loads(msg.parts[0])
            if DEBUG:
                logger.debug('Msg %s from %s: %s', info[0], msg.sender_uid, info)
        except Exception as e:
            logger.warning('Unable to decode object: %s [%r]', str(e), msg.parts[0])
            return

        if info[0] == OS_CALL:
            # In a try statement to postpone checks
            try:
#            if 1:
                (callid, objid, funcname, args, kwargs) = info[1:6]
                objid = misc.UID(bytes=objid)
                sig = kwargs.get(OS_SIGNAL, False)

                # Store signals if waiting a reply or event
                if waiting and sig:
                    self._signal_queue.append(msg)
                    return

                # Unwrap arguments
                try:
                    bufs = msg.parts[1:]
                    args, bufs = _unwrap(args, bufs, msg.sender_uid)
                    kwargs, bufs = _unwrap(kwargs, bufs, msg.sender_uid)
                except:
                    ret = misc.RemoteException('Unable to unwrap objects')
                    reply = [pickle.dumps((OS_RETURN, callid, ret), PICKLE_PROTO)]
                    self.backend.send_msg(msg.sender_uid, reply)
                    return

                if DEBUG:
                    logger.debug('  Processing call %s: %s.%s(%s,%s)', callid, objid, funcname, args, kwargs)

                # Call function
                obj = self.get_object(objid)
                func = getattr(obj, funcname, None)
                ret = func(*args, **kwargs)

                # If a signal, no need to return anything to caller
                if sig:
                    return

                # Wrap return value
                ret, bufs = _wrap(ret)
                if DEBUG:
                    logger.debug('  Returning for call %s: %s', callid, misc.ellipsize(str(ret)))

            # Handle errors
            except Exception as e:
#            if 0:
                if len(info) < 6:
                    logger.error('Invalid call msg: %s', info)
                    ret = misc.RemoteException('Invalid call msg')
                elif 'obj' not in locals() or obj is None:
                    ret = misc.RemoteException('Object %s not available for calls' % objid)
                elif 'func' not in locals() or func is None:
                    ret = misc.RemoteException('Object %s does not have function %s' % (objid, funcname))
                else:
                    tb = traceback.format_exc(15)
                    ret = misc.RemoteException('%s\n%s' % (e, tb))

            # Prepare return packet
            try:
                reply = [pickle.dumps((OS_RETURN, callid, ret), PICKLE_PROTO)]
                reply.extend(bufs)
            except:
                ret = misc.RemoteException('Unable to pickle return %s' % str(ret))
                reply = [pickle.dumps((OS_RETURN, callid, ret), PICKLE_PROTO)]
            self.backend.send_msg(msg.sender_uid, reply)

        elif info[0] == OS_RETURN:
            if len(info) < 3:
                return Exception('Invalid return msg')

            # Get call id and unwrap return value
            callid, ret = info[1:3]
            ret, bufs = _unwrap(ret, msg.parts[1:], msg.sender_uid)

            if DEBUG:
                logger.debug('  Processing return for %s: %s', callid, ret)
            if callid in self.reply_objects:
                self.reply_objects[callid].set(ret)
                # We should not keep track of the reply object
                del self.reply_objects[callid]
            else:
                raise Exception('Reply for unkown call %s', callid)

        elif info[0] == 'hello_from':
            msg.sender_uid = misc.UID(bytes=info[1])
            from_addr = info[2]
            logger.debug('hello_from client %s with server @ %s', msg.sender_uid, from_addr)
            self.backend.connect_from(msg.sock, msg.sender_uid, from_addr)
# This was necessary for ZMQ sockets
#            if not self.backend.connected_to(msg.sender_uid):
#                if DEBUG:logger.debug('Initiating reverse connection...')
#                self.backend.connect_to(from_addr, msg.sender_uid)
            if DEBUG:
                logger.debug('Sending hello_reply')
            reply = ('hello_reply', self.root_uid.b, self.backend.get_server_address())
            self.backend.send_msg(msg.sender_uid, [pickle.dumps(reply, PICKLE_PROTO)])
            self.request_client_proxy(msg.sender_uid, async_arg=True)

        elif info[0] == 'hello_reply':
            msg.sender_uid = misc.UID(bytes=info[1])
            from_addr = info[2]
            if DEBUG:
                logger.debug('hello_reply client %s with server @ %s', msg.sender_uid, from_addr)
            self.backend.connect_from(msg.sock, msg.sender_uid, from_addr)
            self.request_client_proxy(msg.sender_uid, async_arg=True)

        elif info[0] == 'goodbye_from':
            if DEBUG:
                logger.debug('Goodbye client %s from %s', msg.sender_uid, info[1])
            forget_uid = self.backend.get_uid_for_addr(info[1])
            if forget_uid in self.clients:
                del self.clients[forget_uid]
                if DEBUG:
                    logger.debug('deleting client %s', forget_uid)
            self.backend.forget_connection(info[1], remote=False)
            if msg.sender_uid in self.clients:
                del self.clients[msg.sender_uid]
                if DEBUG:
                    logger.debug('deleting client %s', msg.sender_uid)

        # Ping - pong to check alive
        elif info[0] == 'ping':
            logger.info('PING from %s', msg.sender_uid)
            msg = pickle.pickle(('pong',), PICKLE_PROTO)
            self.backend.send_msg(msg.sender_uid, [msg])
        elif info[0] == 'pong':
            logger.info('PONG from %s', msg.sender_uid)

        else:
            logger.warning('Unknown msg: %s', info)
Exemple #43
0
# -*- coding: utf-8 -*-

from sqlalchemy import create_engine, MetaData, Table
from pickle import dumps as pickle
from binascii import hexlify

from application import SQLALCHEMY_DATABASE_URI, db


if __name__ == '__main__':
    engine = create_engine(SQLALCHEMY_DATABASE_URI)

    for name in ['users', 'lists', 'tasks']:
        meta = MetaData(bind=engine)
        table = Table(name, meta, autoload=True)
        columns = table.columns.keys()

        with file('db.%s.bak' % name, 'wb') as f:
            for record in db.session.query(table).all():
                data = dict(
                    [(str(column), getattr(record, column))
                        for column in columns]
                )

                f.write(hexlify(pickle(data)) + '\n')
Exemple #44
0
        print("Current Content:%s" % obj.content)
        if (obj.verifyIntegrity()):
            print("received content integrity check passed")
            #Now we need to verify integrity of client_get_attestation
            if obj.verifyCloudGetAttestation():
                print("cloud Get attestation verified! will store attestation")
                obj.content = raw_input("Lets Modify content to?:")
                #                obj.setNewkey_block_Version_No()
                obj.block_Version_No = obj.block_Version_No + 1
                #                obj.block_Version_No=int(raw_input("current Block version: %s,Enter New:" %obj.block_Version_No))
                if obj.createClientPutAttestation() == 0:
                    print("User %s has no write privileges for Block:%d" %
                          (obj.username, obj.block_Id))
                else:
                    [returnCode, cloudReply, chain_Hash] = obj.put(
                        p.pickle(obj.clientPutAttestation), obj.block_Id,
                        obj.key_block_Version_No, obj.block_Version_No,
                        obj.block_hashPickled, obj.content,
                        p.pickle(
                            hashAndSign(obj.block_Id, obj.b64, obj.username)))

                    #                [returnCode,cloudReply,chain_Hash]=put(p.pickle(obj.clientPutAttestation),obj.block_Id,obj.key_block_Version_No,obj.block_Version_No,obj.block_hashPickled,obj.content,p.pickle(hashAndSign(obj.block_Id,obj.content,obj.username)),obj.username)

                    if returnCode == 1:
                        cloudPublicKey = p.unpickle(
                            cloudStorage.getPublicKey())
                        obj.hashOfElements = hash(obj.concat)
                        if verifySignature(cloudPublicKey,
                                           p.unpickle(cloudReply),
                                           obj.hashOfElements):
                            print(
Exemple #45
0
 def __setitem__(self, id, data):
     db.DB.__setitem__(self, pickle.pickle(data))
Exemple #46
0
def getPublicKey():
    f = open('cloud.pub_key', 'r').read()
    key = RSA.importKey(f)
    return p.pickle(key)
def save_parameters(params, file_name):
    pickle(params, "params/" + file_name + ".pkl")
def test_dA(learning_rate=0.1, training_epochs=20,
			dataset='mnist.pkl.gz',
			batch_size=20, output_folder='dA_data', mode='test', amount='full'):

	"""
	This demo is tested on MNIST

	:type learning_rate: float
	:param learning_rate: learning rate used for training the DeNosing
						  AutoEncoder

	:type training_epochs: int
	:param training_epochs: number of epochs used for training

	:type dataset: string
	:param dataset: path to the picked dataset

	"""
	datasets = load_data(dataset, mode, amount)
	train_set_x, train_set_y = datasets[0]

	# compute number of minibatches for training, validation and testing
	n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size

	# allocate symbolic variables for the data
	index = T.lscalar()	# index to a [mini]batch
	x = T.matrix('x')  # the data is presented as rasterized images

	######################
	# BUILDING THE MODEL #
	######################

	for noize in [0, 10, 20, 30, 40, 50]:
		print 'noize:', str(noize), '%'

		rng = numpy.random.RandomState(123)
		theano_rng = RandomStreams(rng.randint(2 ** 30))

		da = dA(numpy_rng=rng, theano_rng=theano_rng, input=x,
				n_visible=32 * 32, n_hidden=784) # same as MNIST (28*28=784)

		## load the saved parameters
		learned_params = unpickle('params/dA_' + str(noize) + '.pkl')

		comp_data = da.get_comp_values()

		get_comp_data = theano.function([index], comp_data,
			 givens={
				 x: train_set_x[index * batch_size: (index + 1) * batch_size],
				 da.W: learned_params[0],
				 da.b: learned_params[1]})


		## save compressed data (no corruption)
		print 'creating compressed data...'
		if mode == 'valid':
			data_da = [[0 for j in xrange(28*28)] for i in xrange(n_train_batches*batch_size)]
			
			for batch_index in xrange(n_train_batches):
				comp_x = get_comp_data(batch_index)
			
				for i in xrange(batch_size):
					comp_x[i] = numpy.asarray(comp_x[i], dtype=numpy.float64)
					data_da[batch_index * batch_size + i] = comp_x[i]
			
			data_da = numpy.asarray(data_da)
			pickle(data_da, 'dA_data/' + mode + '_data_da_' + str(noize) + '.pkl')
		else:
			if amount == 'full':
				step_size = 300
			else:
				step_size = 6
			for step in xrange(1,step_size+1):
				print str(step), '/', str(step_size)
				data_da = [[0 for j in xrange(28*28)] for i in xrange(n_train_batches*batch_size/step_size)]
				
				for batch_index in xrange(n_train_batches/step_size):
					comp_x = get_comp_data(batch_index + (n_train_batches / step_size) * (step - 1))
				
					for i in xrange(batch_size):
						comp_x[i] = numpy.asarray(comp_x[i], dtype=numpy.float64)
						data_da[batch_index * batch_size + i] = comp_x[i]
				
				data_da = numpy.asarray(data_da)
				pickle(data_da, 'dA_data/' + mode + '_data_da_' + str(noize) + '_' + str(step) + '.pkl')
def save_parameters(params, file_name):
	pickle(params, 'params/' + file_name + '.pkl')