Esempio n. 1
0
    def collect_commit_dependencies(self, key, file_name, share_ID):
        if not (share_ID in self.share_machine_dict):
            print 'Error: share ID not associated with any machines.  Call add_share_to_machine()'
            return

        machine_proxy = self.machine_proxy_dict[(
            self.share_machine_dict[share_ID]
        )[0]]  #use first machine proxy in list.  TODO# use all proxies at once

        bm = local_blob_manager()
        while True:
            files_needed = bm.blobs_to_restore_blob(
                key, os.path.join(self.storage_directory, share_ID), file_name)
            if files_needed == None:
                return
            for need_file_name in files_needed:
                logging.debug('requesting file to fill dependency: %s',
                              need_file_name)
                file_xmlrpc_data = machine_proxy.get_file(
                    share_ID, need_file_name)
                f = open(
                    os.path.join(self.storage_directory, share_ID,
                                 need_file_name), 'wb')
                f.write(file_xmlrpc_data.data)
                f.close()
Esempio n. 2
0
    def commit(self, params, rpc_id, packet):
        """
		"""
        #@TODO:  Ensure only authenticated users can access this command
        [ver, congest, args] = params
        [
            key, working_directory, share_ID, user_name, commit_msg,
            parent_commit_hash, other_parent_commit_hash
        ] = args
        logging.debug(
            '[key, working_directory, share_ID, user_name, commit_msg, parent_commit_hash, other_parent_commit_hash]: '
            % ([
                key, working_directory, share_ID, user_name, commit_msg,
                parent_commit_hash, other_parent_commit_hash
            ]))

        #@TODO:  If share_ID is None, look through all shares for the parent_commit_hash and use its share
        bm = local_blob_manager()
        commit_hash = bm.commit_directory(
            key, working_directory,
            os.path.join(self.storage_directory, share_ID), user_name,
            commit_msg, parent_commit_hash, other_parent_commit_hash)

        #@TODO: packet response is to myself for it is stored in shared object json_response_dict for use with autosync.  Allow responding to other machines?
        return_address = ('localhost', self.command_port)
        packet.json_RPC_object = dict(jsonrpc="2.0",
                                      result=commit_hash,
                                      id=rpc_id)
        packet.to_address = return_address
        return packet
Esempio n. 3
0
	def on_any_event(self, event):
		logging.debug('auto sync directory event triggered')
		time.sleep(0.1)  #ignore any transient changes
		bm = local_blob_manager()
		bm.commit_directory(self.key, self.monitoring_directory, os.path.join(self.storage_directory, self.share_ID), 
						self.user_name, 'auto', None, None)  #TODO: real value for parent commit
		
Esempio n. 4
0
	def test_delta_commit(self):
		logging.debug('Testing commiting with a known parent')
		reset_storage()
		setup_test_dirs()
		bm = local_blob_manager()
		commit_hash_1 = bm.commit_directory(key, '../resource/test_dir_3/root',
				os.path.join(peer_A_storage, 'test_share'), 'joe.keur', 'first commit msg')
		commit_hash_2 = bm.commit_directory(key, '../resource/test_dir_1/root',
				os.path.join(peer_A_storage, 'test_share'), 'joe.keur', 'first commit msg', commit_hash_1)
Esempio n. 5
0
 def on_any_event(self, event):
     logging.debug('auto sync directory event triggered')
     time.sleep(0.1)  #ignore any transient changes
     bm = local_blob_manager()
     bm.commit_directory(self.key, self.monitoring_directory,
                         os.path.join(self.storage_directory,
                                      self.share_ID), self.user_name,
                         'auto', None,
                         None)  #TODO: real value for parent commit
Esempio n. 6
0
	def restore(self, params, rpc_id, packet):
		"""
		"""
		#@TODO:  Ensure only authenticated users can access this command
		[ver, congest, args] = params
		[key, working_directory, commit_hash] = args
		bm = local_blob_manager()
		#@TODO:  look for version already in working_directory and do differential update
		bm.restore_directory(key, working_directory, self.storage_directory, commit_hash)
Esempio n. 7
0
	def test_initial_commit(self):
		logging.debug('Testing recoring a tree')
		reset_storage()
		setup_test_dirs()
		bm = local_blob_manager()
		commit_hash_1 = bm.commit_directory(key, '../resource/test_dir_1/root',
				os.path.join(peer_A_storage, 'test_share'), 'joe.keur', 'first commit msg')
		bm.restore_directory(key,'../resource/restore_directory_1', os.path.join(peer_A_storage, 'test_share'),
					 commit_hash_1)
Esempio n. 8
0
    def restore(self, params, rpc_id, packet):
        """
		"""
        #@TODO:  Ensure only authenticated users can access this command
        [ver, congest, args] = params
        [key, working_directory, commit_hash] = args
        bm = local_blob_manager()
        #@TODO:  look for version already in working_directory and do differential update
        bm.restore_directory(key, working_directory, self.storage_directory,
                             commit_hash)
Esempio n. 9
0
 def test_initial_commit(self):
     logging.debug('Testing recoring a tree')
     reset_storage()
     setup_test_dirs()
     bm = local_blob_manager()
     commit_hash_1 = bm.commit_directory(
         key, '../resource/test_dir_1/root',
         os.path.join(peer_A_storage, 'test_share'), 'joe.keur',
         'first commit msg')
     bm.restore_directory(key, '../resource/restore_directory_1',
                          os.path.join(peer_A_storage, 'test_share'),
                          commit_hash_1)
Esempio n. 10
0
 def test_delta_commit(self):
     logging.debug('Testing commiting with a known parent')
     reset_storage()
     setup_test_dirs()
     bm = local_blob_manager()
     commit_hash_1 = bm.commit_directory(
         key, '../resource/test_dir_3/root',
         os.path.join(peer_A_storage, 'test_share'), 'joe.keur',
         'first commit msg')
     commit_hash_2 = bm.commit_directory(
         key, '../resource/test_dir_1/root',
         os.path.join(peer_A_storage, 'test_share'), 'joe.keur',
         'first commit msg', commit_hash_1)
Esempio n. 11
0
	def collect_commit_dependencies(self, key, file_name, peer_ID, url = None):
		if (not (peer_ID in self.peer_dict)):
			self.connect_peer(peer_ID, url)
				
		bm = local_blob_manager()
		while True:
			files_needed = bm.blobs_to_restore_blob(key, self.storage_directory, file_name)
			if files_needed==None:
				return
			for need_file_name in files_needed:
				logging.debug('requesting file to fill dependency: %s', need_file_name)
				file_xmlrpc_data = (self.peer_dict[peer_ID]).get_file(need_file_name)
				f=open(os.path.join(self.storage_directory, need_file_name),'wb')
				f.write(file_xmlrpc_data.data)
				f.close()
Esempio n. 12
0
    def sync_new_commit(self, params, rpc_id, packet):
        """
		"""
        [ver, congest, args] = params
        [commit_hash, share_ID] = args
        logging.debug('[commit_hash, share_ID]: %s' %
                      ([commit_hash, share_ID]))
        key = b'Sixteen byte key'  #@TODO:  what do I do about this key?
        #@TODO:  this should sync by updating directory, not overwriting it.
        #@TODO:  should commit this directory before updating it
        if share_ID not in self.autosync_share_working_directory_dict:
            logging.error('No autosync directories with share_ID: %s' %
                          (share_ID))
            return

        if commit_hash[0] != '_':
            commit_hash = '_' + commit_hash

        full_file_name = os.path.join(self.storage_directory, share_ID,
                                      commit_hash)
        if not os.path.isfile(full_file_name):
            logging.error('commit does not exist.  path: %s' %
                          (full_file_name))
            return

        bm = local_blob_manager()
        working_directory_list = self.autosync_share_working_directory_dict[
            share_ID]
        logging.debug('directories to sync: %s' % (working_directory_list))
        for working_directory in working_directory_list:
            file_list, mod_times, last_commit_hash = local_blob_manager.read_commit_meta(
                working_directory)
            logging.debug('parent sync hash: %s' % (last_commit_hash))
            if commit_hash == last_commit_hash:
                logging.debug(
                    'Working directory already contains desired commit')
                continue
            if working_directory in self.autosync_working_directory_dict:  #disable watchdog while writing to directory
                self.autosyncmanager.stop(working_directory)
            (head, tail) = os.path.split(
                working_directory)  #TODO: this seems like a hack
            bm.restore_directory(
                key, head, os.path.join(self.storage_directory, share_ID),
                commit_hash)
            if working_directory in self.autosync_working_directory_dict:
                self.autosyncmanager.start(working_directory)
Esempio n. 13
0
	def commit(self, params, rpc_id, packet):
		"""
		"""
		#@TODO:  Ensure only authenticated users can access this command
		[ver, congest, args] = params
		[key, working_directory, share_ID, user_name, commit_msg, parent_commit_hash, other_parent_commit_hash] = args
		logging.debug('[key, working_directory, share_ID, user_name, commit_msg, parent_commit_hash, other_parent_commit_hash]: ' %([key, working_directory, share_ID, user_name, commit_msg, parent_commit_hash, other_parent_commit_hash]))
					
		#@TODO:  If share_ID is None, look through all shares for the parent_commit_hash and use its share
		bm = local_blob_manager()
		commit_hash = bm.commit_directory(key, working_directory, os.path.join(self.storage_directory, share_ID), 
						user_name, commit_msg, parent_commit_hash, other_parent_commit_hash)
		
		#@TODO: packet response is to myself for it is stored in shared object json_response_dict for use with autosync.  Allow responding to other machines?
		return_address = ('localhost', self.command_port)
		packet.json_RPC_object = dict(jsonrpc="2.0", result=commit_hash, id=rpc_id)
		packet.to_address = return_address
		return packet
Esempio n. 14
0
	def collect_commit_dependencies(self, key, file_name, share_ID):
		if not (share_ID in self.share_machine_dict):
			print 'Error: share ID not associated with any machines.  Call add_share_to_machine()'
			return
		
		machine_proxy = self.machine_proxy_dict[(self.share_machine_dict[share_ID])[0]]  #use first machine proxy in list.  TODO# use all proxies at once
		
				
		bm = local_blob_manager()
		while True:
			files_needed = bm.blobs_to_restore_blob(key, os.path.join(self.storage_directory, share_ID), file_name)
			if files_needed==None:
				return
			for need_file_name in files_needed:
				logging.debug('requesting file to fill dependency: %s', need_file_name)
				file_xmlrpc_data = machine_proxy.get_file(share_ID, need_file_name)
				f=open(os.path.join(self.storage_directory, share_ID, need_file_name),'wb')
				f.write(file_xmlrpc_data.data)
				f.close()
Esempio n. 15
0
    def collect_commit_dependencies(self, key, file_name, peer_ID, url=None):
        if (not (peer_ID in self.peer_dict)):
            self.connect_peer(peer_ID, url)

        bm = local_blob_manager()
        while True:
            files_needed = bm.blobs_to_restore_blob(key,
                                                    self.storage_directory,
                                                    file_name)
            if files_needed == None:
                return
            for need_file_name in files_needed:
                logging.debug('requesting file to fill dependency: %s',
                              need_file_name)
                file_xmlrpc_data = (
                    self.peer_dict[peer_ID]).get_file(need_file_name)
                f = open(os.path.join(self.storage_directory, need_file_name),
                         'wb')
                f.write(file_xmlrpc_data.data)
                f.close()
Esempio n. 16
0
	def sync_new_commit(self, params, rpc_id, packet):
		"""
		"""
		[ver, congest, args] = params
		[commit_hash, share_ID] = args
		logging.debug('[commit_hash, share_ID]: %s'%([commit_hash, share_ID]))
		key=b'Sixteen byte key'  #@TODO:  what do I do about this key?
		#@TODO:  this should sync by updating directory, not overwriting it.
		#@TODO:  should commit this directory before updating it
		if share_ID not in self.autosync_share_working_directory_dict:
			logging.error('No autosync directories with share_ID: %s' %(share_ID))
			return
		
		if commit_hash[0] != '_':
			commit_hash = '_' + commit_hash
		
		full_file_name = os.path.join(self.storage_directory, share_ID, commit_hash)
		if not os.path.isfile(full_file_name):
			logging.error('commit does not exist.  path: %s' %(full_file_name))
			return
		
		bm = local_blob_manager()
		working_directory_list = self.autosync_share_working_directory_dict[share_ID]
		logging.debug('directories to sync: %s' %(working_directory_list))
		for working_directory in working_directory_list:
			file_list, mod_times, last_commit_hash = local_blob_manager.read_commit_meta(working_directory)
			logging.debug('parent sync hash: %s'%(last_commit_hash))
			if commit_hash == last_commit_hash:
				logging.debug('Working directory already contains desired commit')
				continue
			if working_directory in self.autosync_working_directory_dict:  #disable watchdog while writing to directory
				self.autosyncmanager.stop(working_directory)
			(head, tail) = os.path.split(working_directory)  #TODO: this seems like a hack
			bm.restore_directory(key, head, os.path.join(self.storage_directory, share_ID), commit_hash)
			if working_directory in self.autosync_working_directory_dict:
				self.autosyncmanager.start(working_directory)
Esempio n. 17
0
print "************************************************************************"
# encrypt and store the first file blob, then decrypt and load
fb_hash = fb.store(key, os.path.join(peer_A_storage, "test_share"))
fb3 = file_blob()
fb3.my_hash = "3"
fb3.load(key, os.path.join(peer_A_storage, "test_share"), fb_hash)
fb3.display()


print "\n\n"
print "************************************************************************"
print "***Testing loading a whole directory as an initial commit"
logging.debug("Testing loading a whole directory as an initial commit")
print "************************************************************************"
# load a whole directory as an initial commit
bm = local_blob_manager()
commit_hash_1 = bm.commit_directory(
    key, "../resource/test_directory_1/root", os.path.join(peer_A_storage, "test_share"), "joe.keur", "first commit msg"
)
bm.restore_directory(key, "../resource/restore_directory_1", os.path.join(peer_A_storage, "test_share"), commit_hash_1)


print "\n\n"
print "************************************************************************"
print "***Testing adding a second commit"
logging.debug("Testing adding a second commit")
print "************************************************************************"
bm = local_blob_manager()
commit_hash_2 = bm.commit_directory(
    key,
    "../resource/test_directory_2/root",
Esempio n. 18
0
logging.debug('Testing storing and loading a simple file blob')
print '************************************************************************'
#encrypt and store the first file blob, then decrypt and load
fb_hash = fb.store(key, os.path.join(peer_A_storage, 'test_share'))
fb3 = file_blob()
fb3.my_hash = '3'
fb3.load(key, os.path.join(peer_A_storage, 'test_share'), fb_hash)
fb3.display()

print '\n\n'
print '************************************************************************'
print '***Testing loading a whole directory as an initial commit'
logging.debug('Testing loading a whole directory as an initial commit')
print '************************************************************************'
#load a whole directory as an initial commit
bm = local_blob_manager()
commit_hash_1 = bm.commit_directory(key, '../resource/test_directory_1/root',
                                    os.path.join(peer_A_storage, 'test_share'),
                                    'joe.keur', 'first commit msg')
bm.restore_directory(key, '../resource/restore_directory_1',
                     os.path.join(peer_A_storage, 'test_share'), commit_hash_1)

print '\n\n'
print '************************************************************************'
print '***Testing adding a second commit'
logging.debug('Testing adding a second commit')
print '************************************************************************'
bm = local_blob_manager()
commit_hash_2 = bm.commit_directory(key, '../resource/test_directory_2/root',
                                    os.path.join(peer_A_storage,
                                                 'test_share'), 'joe.keur',