示例#1
0
 def _create_database(self):
     db_settings = self.settings['results_database']
     self.database = Database(db_settings['path'],
                              self.DB_ATTRIBUTES,
                              db_settings['database_type'],
                              verbose=self.verbose)
示例#2
0
class ResultsHandler(Printer):

    DB_ATTRIBUTES = {
        'status': 'string',
        'job_id': 'string',
        'repetition': 'integer',
        'work_dir': 'string',
        'exp_identifier': 'string',
        'parameters': 'pickle',
        'objectives': 'pickle',
        'author': 'pickle'
    }
    PROCESSED_JOBS = []

    def __init__(self, settings, verbose=True):
        Printer.__init__(self, 'RESULTS HANDLER', color='yellow')
        self.settings = settings
        self.verbose = verbose

        self._create_database()

    def _create_database(self):
        db_settings = self.settings['results_database']
        self.database = Database(db_settings['path'],
                                 self.DB_ATTRIBUTES,
                                 db_settings['database_type'],
                                 verbose=self.verbose)

    def process_results(self, results_dict):
        results_dict['status'] = 'new'
        self.database.add(results_dict)

    def remove_results(self, identifier):
        self._print('removing feedback for %s' % identifier)
        condition = {'exp_identifier': identifier}
        self.database.remove_all(condition)

    def get_new_results(self):
        condition = {'status': 'new'}
        new_results_list = self.database.fetch_all(condition)

        # check, if:
        #  - for a given experiment
        #  - and a given job_id
        #  --> all repetitions are executed
        new_results = {}
        for result in new_results_list:
            exp_identifier = result['exp_identifier']
            job_id = result['job_id']
            if exp_identifier in new_results.keys():
                if job_id in new_results[exp_identifier]:
                    new_results[exp_identifier][job_id].append(result)
                else:
                    new_results[exp_identifier][job_id] = [result]
            else:
                new_results[exp_identifier] = {job_id: [result]}

        # get those jobs, for which we have all the results
        completed_jobs = []
        for exp_identifier in new_results.keys():
            # get experiment
            for experiment in self.settings['experiments']:
                if experiment['name'] == exp_identifier:
                    break
            num_repetitions = experiment['repetitions']
            for job_id in new_results[exp_identifier]:
                if len(new_results[exp_identifier][job_id]) == num_repetitions:
                    completed_jobs.append(job_id)

        return completed_jobs

        # separate the new feedbacks by name and by repetition
#		new_results = {}
#		condition         = {'status': 'new'}
#		new_result_list = self.database.fetch_all(condition)
# separate the new feedbacks by name
#		new_results = {}
#		for result in new_result_list:
#			if result['exp_identifier'] in new_results.keys():
#				new_results[result['exp_identifier']].append(result)
#			else:
#				new_results[result['exp_identifier']] = [result]
#		return new_results

    def analyze_new_results(self, job_id):
        # get experiments with the defined job_id
        condition = {'job_id': job_id}
        results = self.database.fetch_all(condition)

        # copy information to the processed dictionary
        processed = {}
        for att in [
                'job_id', 'work_dir', 'exp_identifier', 'parameters', 'author'
        ]:
            processed[att] = copy.deepcopy(results[0][att])
        processed['loss'] = {}

        # perform operations on results
        exp_identifier = results[0]['exp_identifier']
        for experiment in self.settings['experiments']:
            if experiment['name'] == exp_identifier:
                break

        for objective in experiment['objectives']:
            name = objective['name']
            operation = objective['operation']

            # get all results
            #			print('RESULT', results)
            values = np.array(
                [result['objectives'][name] for result in results])
            if operation == 'average':
                value = np.mean(values)
            elif operation == 'std_rel':
                value = np.std(values) / np.mean(values)
            else:
                raise NotImplementedError()
            processed['loss']['%s_%s' % (name, operation)] = value

        setattr(self, 'info_dict_%s' % job_id, copy.deepcopy(processed))
        self.PROCESSED_JOBS.append(job_id)

    def set_all_to_used(self, job_id):
        condition = {'job_id': job_id, 'status': 'new'}
        update = {'status': 'used'}
        self.database.update(condition, update)
示例#3
0
class BotManager(Printer):

    DB_ATTRIBUTES = {
        'status': 'integer',
        'name': 'string',  # serves as id (i.e. is unique)
        'parameters': 'pickle',  # list of parameters the bot can process
        'possible_experiments': 'pickle',  # list of possible experiments
        'communication': 'dictionary'
    }  # manages communication with bot
    QUEUED_JOBS = []
    PROCESSED_JOBS = []
    FILE_LOGGERS = []
    RUNNING_EXPS = []
    ROBOT_STATUS = {}

    def __init__(self, settings, verbose=True):
        Printer.__init__(self, 'BOT MANAGER', color='red')
        self.settings = settings
        self.verbose = verbose
        self._create_database()

        # add all bots found in settings
        self._add_all_bots()

    def _create_database(self):
        db_settings = self.settings['bot_database']
        self.database = Database(db_settings['path'],
                                 self.DB_ATTRIBUTES,
                                 db_settings['database_type'],
                                 verbose=self.verbose)

    def _add_all_bots(self):
        for bot_dict in self.settings['bots']:
            bot_dict['status'] = 0
            self.add_bot(bot_dict)

    def add_bot(self, bot_dict):
        # we need to find out which experiments the bot can run
        bot_dict['possible_experiments'] = []
        for experiment in self.settings['experiments']:
            possible = True
            for variable in experiment['variables']:
                possible = possible and variable['name'] in bot_dict[
                    'parameters']
            if possible:
                bot_dict['possible_experiments'].append(experiment['name'])
        self.database.add(bot_dict)

    def get_available(self, experiment_name):
        condition = {'status': 0}
        available_bots = self.database.fetch_all(condition)

        # get bots matching to experiments
        for bot in available_bots:
            if experiment_name in bot['possible_experiments']:
                break
        else:
            # did not find bot
            return None
        return bot

    def _relabel(self, bot_name, status):
        condition = {'name': bot_name}
        update = {'status': status}
        return self.database.update(condition, update)

    def label_busy(self, bot_info):
        if isinstance(bot_info, dict):
            return self._relabel(bot_info['name'], 1)
        elif isinstance(bot_info, str):
            return self._relabel(bot_info, 1)

    def label_available(self, bot_info):
        if isinstance(bot_info, dict):
            return self._relabel(bot_info['name'], 0)
        elif isinstance(bot_info, str):
            return self._relabel(bot_info, 0)

    def process_evaluated_params(self, file_name):
        # file logger finds the file it is looking for
        # and triggers this method
        # which collects and processes all information
        self._print('found processed job in %s' % file_name)

        # see if this is a loadable and comprehensible pickle file
        try:
            data = pickle.load(open(file_name, 'rb'))
            job_id = data['job_id']
            bot_name = data['bot_name']
            exp_name = data['exp_identifier']
        except AttributeError:
            self._print('could not process file %s' % file_name)
            return None

        # check if we want to use the file
        if self.ROBOT_STATUS[exp_name] == 'usable':

            # getting the loss
            for experiment in self.settings['experiments']:
                if exp_name == experiment['name']:
                    #					loss_name   = experiment['loss_name']
                    #					loss_type   = experiment['loss_type']
                    repetitions = experiment['repetitions']
                    break

            data['objectives'] = {}
            for objective in experiment['objectives']:
                try:
                    data['%s_raw' % objective['name']] = copy.deepcopy(
                        data[objective['name']])
                except:
                    self._print('could not process file %s' % file_name)
                    return None

                if objective['type'] == 'minimum':
                    data['objectives'][objective['name']] = copy.deepcopy(
                        data[objective['name']])
                elif objective['type'] == 'maximum':
                    data['objectives'][objective['name']] = -copy.deepcopy(
                        data[objective['name']])
                else:
                    raise NotImplementedError

            # store data dict as attribute and append to processed jobs
            setattr(self,
                    'info_dict_%s_%d' % (data['job_id'], data['repetition']),
                    data)
            self.QUEUED_JOBS.append('%s_%d' %
                                    (data['job_id'], data['repetition']))

        else:
            self._print('found only trash results')

        file_logger = self.FILE_LOGGERS[0]
        file_logger.stop()
        del self.FILE_LOGGERS[0]

        if len(self.FILE_LOGGERS) > 0:
            file_logger = self.FILE_LOGGERS[0]
            file_logger.start()

#		if self.ROBOT_STATUS[exp_name] == 'usable':
#
#			# store data dict as attribute and append to processed jobs
#			setattr(self, 'info_dict_%s_%d' % (data['job_id'], data['repetition']), data)
#			self.QUEUED_JOBS.append('%s_%d' % (data['job_id'], data['repetition']))
#
#		else:
#			self._print('found only trash results')
#
#		print('ROBOT QUEUED JOBS before', self.QUEUED_JOBS)

# release bot
        if len(self.FILE_LOGGERS) == 0:
            bot = self.label_available(bot_name)
            for job in self.QUEUED_JOBS:
                self.PROCESSED_JOBS.append(job)
            self.QUEUED_JOBS = []
#
#		print('ROBOT QUEUED JOBS after', self.QUEUED_JOBS)

    def kill_running_robots(self, exp_identifier):
        self._print('killing parameter generation for %s' % exp_identifier)
        # just need to stop the thread
        self.ROBOT_STATUS[exp_identifier] = 'trash'

    def submit(self, bot_dict, experiment):
        self._print('submitting job %s to bot %s' %
                    (experiment['job_id'], bot_dict['name']))

        # set bot to busy
        self.label_busy(bot_dict)
        experiment['bot_name'] = bot_dict['name']

        # find settings for the experiment to be run
        for exp_settings in self.settings['experiments']:
            if exp_settings['name'] == experiment['exp_identifier']:
                num_reps = exp_settings['repetitions']
                break

        # prepare and initialize the file logger
        for rep in range(num_reps):
            file_logger = FileLogger(
                action=self.process_evaluated_params,
                path=bot_dict['communication']['pick_up_path'])
            # if no file logger running, start this one
            if len(self.FILE_LOGGERS) == 0:
                file_logger.start()
            self.FILE_LOGGERS.append(file_logger)

            self.ROBOT_STATUS[experiment['exp_identifier']] = 'usable'

            # prepare and submit all possible experiemnts
            file_name = '%s/%s_rep_%d.pkl' % (self.settings['scratch_dir'],
                                              experiment['job_id'], rep)
            experiment['repetition'] = rep
            pickle.dump(experiment, open(file_name, 'wb'))

            # submit experiment to bot
            if 'host' in bot_dict['communication'].keys():
                # need to run scp
                subprocess.call(
                    'scp %s %s@%s:%s' %
                    (file_name, bot_dict['communication']['username'],
                     bot_dict['communication']['host'],
                     bot_dict['communication']['dump_path']),
                    shell=True)
            else:
                # can just copy file (e.g. to dropbox folder)
                shutil.copy2(file_name, bot_dict['communication']['dump_path'])

            # clean up
            os.remove(file_name)

    def boot_bots(self):
        for bot_dict in self.settings['bots']:
            status_file = bot_dict['communication']['status_file']
            if not os.path.isfile(status_file):
                data = {'status': 'running'}
            else:
                data = pickle.load(open(status_file, 'rb'))
                data['status'] = 'running'
            pickle.dump(data, open(status_file, 'wb'))

    def shutdown(self, bot_dict=None):

        # if no bot_dict, shutdown all robots
        if isinstance(bot_dict, dict):
            status_file = bot_dict['communication']['status_file']
            if not os.path.isfile(status_file):
                data = {'status': 'shutdown'}
            else:
                data = pickle.load(open(status_file, 'rb'))
                data['status'] = 'shutdown'
            pickle.dump(data, open(status_file, 'wb'))
        else:
            for bot_dict in self.settings['bots']:
                status_file = bot_dict['communication']['status_file']
                if not os.path.isfile(status_file):
                    data = {'status': 'shutdown'}
                else:
                    data = pickle.load(open(status_file, 'rb'))
                    data['status'] = 'shutdown'
                pickle.dump(data, open(status_file, 'wb'))
示例#4
0
class ParamGenerator(Printer):

    BUSY = {}
    DB_ATTRIBUTES = {
        'exp_identifier': 'string',
        'status': 'integer',
        'parameters': 'pickle',
        'sampling_parameter_value': 'integer'
    }
    OPTIMIZER = None
    PARAM_STATUS = {}
    TARGET_SPECS = {}

    # add generated parameters as attributes to the parameter generator with a unique attribute name
    # then, have chemOS pick up the generated parameters and dump them in the respective database
    # make sure, to always properly destroy the attributes after they have been dumped

    def __init__(self, settings, verbose=True):
        Printer.__init__(self, 'PARAMETER GENERATOR')
        self.settings = settings
        self.verbose = verbose

        self.settings['algorithm']['scratch_dir'] = self.settings[
            'scratch_dir']

        # importing the wrappers here allows to use one optimimization algorithm
        # without having installed the others

        if self.settings['algorithm']['name'] == 'phoenics':
            from ParamGenerator.Phoenics.phoenics_wrapper import PhoenicsWrapper
            self.optimization_algorithm = PhoenicsWrapper(
                self.settings['algorithm'])
        elif self.settings['algorithm']['name'] == 'smac':
            from ParamGenerator.SMAC.smac_wrapper import SmacWrapper
            self.optimization_algorithm = SmacWrapper(
                self.settings['algorithm'])
        elif self.settings['algorithm']['name'] == 'spearmint':
            from ParamGenerator.Spearmint.spearmint_wrapper import SpearmintWrapper
            self.optimization_algorithm = SpearmintWrapper(
                self.settings['algorithm'])
        elif self.settings['algorithm']['name'] == 'random_search':
            from ParamGenerator.RandomSearch.random_search_wrapper import RandomsearchWrapper
            self.optimization_algorithm = RandomsearchWrapper(
                self.settings['algorithm'])
        else:
            raise NotImplementedError

        self.BUSY = {
            experiment['name']: False
            for experiment in self.settings['experiments']
        }
        self.number_proposed_parameters = {
            experiment['name']: 0
            for experiment in self.settings['experiments']
        }

        self._create_database()
        self.number_proposed_recipes = {}

    def _create_database(self):
        db_settings = self.settings['param_database']
        self.database = Database(db_settings['path'],
                                 self.DB_ATTRIBUTES,
                                 db_settings['database_type'],
                                 verbose=self.verbose)

    def _get_experiment(self, identifier):
        for experiment in self.settings['experiments']:
            if experiment['name'] == identifier:
                break
        return experiment

    def _get_random_parameters(self, identifier):
        # get experiment settings
        experiment = self._get_experiment(identifier)

        #		with self.optimization_algorithm.get_instance(identifier, experiment) as optimizer:
        optimizer = self.optimization_algorithm.get_instance(
            identifier, experiment)
        #		with self.optimization_algorithm(identifier, experiment) as optimizer:
        normalized_parameter = optimizer.choose()
        del optimizer

        parameter = self._rescale_parameters(normalized_parameter, identifier)
        # we need to rescale the
        return parameter

    def _get_sampling_parameter(self, identifier):
        if not identifier in self.number_proposed_parameters.keys():
            self.number_proposed_parameters[identifier] = 0
        return self.number_proposed_parameters[identifier] % self.settings[
            'algorithm']['batch_size']

    def select_parameters(self, identifier):
        sampling_parameter = self._get_sampling_parameter(identifier)

        condition = {
            'exp_identifier': identifier,
            'sampling_parameter_value': sampling_parameter,
            'status': 0
        }
        target = 'parameters'
        parameter = self.database.fetch(condition, target)

        # check if we got parameters from the database
        retrain = False
        if type(parameter).__name__ != 'ndarray':
            # we did not get a valid set of parameters, so we need to generate a random parameter set
            parameter = self._get_random_parameters(identifier)
            retrain = True
        else:
            # update the status
            update = {'status': 1}
            self.database.update(condition, update)

        wait = retrain and self.BUSY[identifier]

        parameter = np.squeeze(parameter)

        if not wait:
            self.number_proposed_parameters[identifier] += 1
        return parameter, wait, retrain

    def remove_parameters(self, identifier):
        condition = {'exp_identifier': identifier}
        self.database.remove_all(condition)

    def _normalize_observations(self, observations, exp_identifier):
        # get experiment
        experiment = self._get_experiment(exp_identifier)
        var_names = [variable['name'] for variable in experiment['variables']]
        var_lows = [variable['low'] for variable in experiment['variables']]
        var_highs = [variable['high'] for variable in experiment['variables']]

        rescaled_observations = []
        for observation in observations:
            rescaled_observation = copy.deepcopy(observation)
            for var_index, var_name in enumerate(var_names):
                value = rescaled_observation[var_name]['samples']
                # FIXME: for now, only linear rescaling
                rescaled_observation[var_name]['samples'] = (
                    value - var_lows[var_index]) / (var_highs[var_index] -
                                                    var_lows[var_index])
            rescaled_observations.append(rescaled_observation)
        return rescaled_observations

    def _rescale_parameters(self, normalized_parameters, exp_identifier):
        experiment = self._get_experiment(exp_identifier)
        var_names = [variable['name'] for variable in experiment['variables']]
        var_lows = [variable['low'] for variable in experiment['variables']]
        var_highs = [variable['high'] for variable in experiment['variables']]
        var_sizes = [variable['size'] for variable in experiment['variables']]

        parameters = []
        for norm_param in normalized_parameters:
            start_index = 0
            param = []
            for var_index, var_name in enumerate(var_names):

                values = norm_param[start_index:start_index +
                                    var_sizes[var_index]]
                # FIXME: for now, only linear rescaling
                rescaled = (var_highs[var_index] -
                            var_lows[var_index]) * values + var_lows[var_index]
                param.extend(rescaled)
                start_index += var_sizes[var_index]
            parameters.append(np.copy(param))
        parameters = np.array(parameters)
        return parameters

    def kill_running_instances(self, exp_identifier):
        self._print('killing parameter generation for %s' % exp_identifier)
        # just need to stop the thread
        self.PARAM_STATUS[exp_identifier] = 'trash'

    def _parameter_generation(self):
        optimizer = self._optimizer
        exp_identifier = self._exp_identifier
        self._print('initializing learning procedure')

        # we need to rescale the parameters here!
        #		rescaled_observations = self._normalize_observations(self.TARGET_SPECS[exp_identifier], exp_identifier)
        #		normalized_parameters = optimizer.choose(observations = rescaled_observations)

        normalized_parameters = optimizer.choose(
            observations=self.TARGET_SPECS[exp_identifier])
        parameters = self._rescale_parameters(normalized_parameters,
                                              exp_identifier)

        if self.PARAM_STATUS[exp_identifier] == 'usable':

            # updating database
            self._print('updating parameter database')
            for parameter in parameters:
                print('\t', parameter, np.linalg.norm(parameter))
            condition = {'exp_identifier': exp_identifier}
            new_entries = [{
                'exp_identifier': exp_identifier,
                'status': 0,
                'parameters': parameters[index],
                'sampling_parameter_value': index
            } for index in range(len(parameters))]
            self.database.replace(condition, new_entries)

        else:
            self._print('found only trash results')

        # reset
        del self.TARGET_SPECS[exp_identifier]
        del self.PARAM_STATUS[exp_identifier]
        self.BUSY[exp_identifier] = False
        del self._optimizer

    def generate_new_parameters(self, exp_identifier):
        for experiment in self.settings['experiments']:
            if experiment['name'] == exp_identifier:
                break

        # check if busy
        try:
            busy = self.BUSY[exp_identifier]
        except KeyError:
            busy = False
        if busy:
            return None
        self.BUSY[exp_identifier] = True

        self._print('starting parameter generation process for %s' %
                    exp_identifier)
        self._print('getting optimizer instance')
        self._optimizer = self.optimization_algorithm.get_instance(
            exp_identifier, experiment)
        self._exp_identifier = exp_identifier
        self._print('submitting training process')

        # running the parameter generation locally
        # FIXME: CHANGE CODE HERE TO IMPLEMENT TRAINING ON OTHER COMPUTING RESOURCES!
        generation_thread = Thread(target=self._parameter_generation)
        self.PARAM_STATUS[exp_identifier] = 'usable'
        generation_thread.start()
示例#5
0
class FeedbackHandler(Printer):

    DB_ATTRIBUTES = {
        'status': 'string',
        'job_id': 'string',
        'repetition': 'integer',
        'work_dir': 'string',
        'exp_identifier': 'string',
        'parameters': 'pickle',
        'loss': 'pickle',
        'author': 'pickle'
    }

    def __init__(self, settings, verbose=True):
        Printer.__init__(self, 'FEEDBACK HANDLER', color='yellow')
        self.settings = settings
        self.verbose = verbose

        self._create_database()

    def _create_database(self):
        db_settings = self.settings['feedback_database']
        self.database = Database(db_settings['path'],
                                 self.DB_ATTRIBUTES,
                                 db_settings['database_type'],
                                 verbose=self.verbose)

    def process_feedback(self, feedback_dict):
        feedback_dict['status'] = 'new'
        self.database.add(feedback_dict)

    def remove_feedback(self, identifier):
        self._print('removing feedback for %s' % identifier)
        condition = {'exp_identifier': identifier}
        self.database.remove_all(condition)

    def get_new_feedback(self):
        condition = {'status': 'new'}
        new_feedback_list = self.database.fetch_all(condition)
        # separate the new feedbacks by name
        new_feedbacks = {}
        for feedback in new_feedback_list:
            if feedback['exp_identifier'] in new_feedbacks.keys():
                new_feedbacks[feedback['exp_identifier']].append(feedback)
            else:
                new_feedbacks[feedback['exp_identifier']] = [feedback]
        return new_feedbacks

    def _construct_observation_dict(self, exp_identifier, parameters, feedback,
                                    work_dirs):
        # get experiment information
        for experiment in self.settings['experiments']:
            if experiment['name'] == exp_identifier:
                break
        # collect observations
        observations = []
        for index, parameter in enumerate(parameters):
            start_index = 0
            new_obs = {'loss': feedback[index], 'work_dir': work_dirs[index]}
            for variable in experiment['variables']:
                if len(parameter.shape) == 1:
                    new_obs[variable['name']] = {
                        'samples':
                        parameter[start_index:start_index + variable['size']]
                    }
                elif len(parameter.shape) == 2:
                    new_obs[variable['name']] = {
                        'samples':
                        parameter[0,
                                  start_index:start_index + variable['size']]
                    }
                start_index += variable['size']
            observations.append(copy.deepcopy(new_obs))
        return observations

    def get_observations(self, exp_identifier):
        condition = {'exp_identifier': exp_identifier}
        entry_list = self.database.fetch_all(condition)
        parameters, feedbacks, work_dirs = [], [], []
        for entry in entry_list:
            work_dirs.append(entry['work_dir'])
            parameters.append(entry['parameters'])
            feedbacks.append(entry['loss'])
        parameters = np.array(parameters)
        feedbacks = np.array(feedbacks)

        # observations: list of dictionaries with 'loss' and var_name being the key for another dict
        observations = self._construct_observation_dict(
            exp_identifier, parameters, feedbacks, work_dirs)

        return observations

    def set_all_to_used(self, exp_identifier):
        condition = {'exp_identifier': exp_identifier, 'status': 'new'}
        update = {'status': 'used'}
        self.database.update(condition, update)
示例#6
0
class RequestHandler(Printer):

	DB_ATTRIBUTES = {'status': 'string',
					 'job_id': 'string', 
					 'exp_identifier': 'string',
					 'parameters': 'pickle',
					 'author': 'pickle'}


	def __init__(self, settings, verbose = True):
		Printer.__init__(self, 'REQUEST HANDLER', color = 'yellow')
		self.settings = settings
		self.verbose  = verbose
		self._create_database()


	def _create_database(self):
		db_settings   = self.settings['request_database']
		self.database = Database(db_settings['path'], self.DB_ATTRIBUTES,
								 db_settings['database_type'], verbose = self.verbose)


	def process_request(self, request_dict):
		# add information to dictionary
		job_id = str(uuid.uuid4())
		request_dict['job_id'] = job_id
		request_dict['status'] = 'pending'

		# dump entry in database
		self.database.add(request_dict)


	def remove_requests(self, identifier):
		self._print('removing requests for %s' % identifier)
		condition = {'exp_identifier': identifier}
		self.database.remove_all(condition)


	def _relabel(self, job_id, status):
		condition = {'job_id': job_id}
		update    = {'status': status}
		self.database.update(condition, update)
		return 0


	def label_processing(self, request_dict):
		return self._relabel(request_dict['job_id'], 'processing')


	def label_feedbackless(self, request_dict):
		return self._relabel(request_dict['job_id'], 'feedbackless')


	def label_done(self, request_dict):
		return self._relabel(request_dict['job_id'], 'done')


	def dump_parameters(self, request, parameters):
		self._print('dumping parameter set')
		print('\t\t', parameters)
		condition = {'job_id': request['job_id']}
		update    = {'parameters': parameters}
		self.database.update(condition, update)


	def get_pending_request(self):
		condition = {'status': 'pending'}
		requests  = self.database.fetch_all(condition)
		return requests