Exemple #1
0
def main():
    arg_parser = argparse.ArgumentParser(
        description="Fetch solutions from the database")
    arg_parser.add_argument('--host',
                            type=str,
                            default='enterprise',
                            help='Host to connect to')
    arg_parser.add_argument('-u',
                            '--user',
                            type=str,
                            default='planner',
                            help='database user')
    arg_parser.add_argument('-p',
                            '--password',
                            type=str,
                            help='password of the database user')
    arg_parser.add_argument('domain',
                            type=str,
                            help='the domain to fetch the solutions for')
    args = arg_parser.parse_args()
    database = db.auth(args)
    print('Fetching results for domain {}.'.format(args.domain))
    for problem in database.problems.find({'domain': args.domain}):
        name = problem['name']
        solution = database.solutions.find_one({
            'problem': problem['_id'],
            'use_for_macros': True
        })
        if solution:
            solution_file = open(name + '.pddl.soln', 'w')
            solution_file.write(solution['raw'])
            solution_file.close()
Exemple #2
0
def story(s="default story"):
	db.auth()
	if request.method=='GET':
		lines=db.getLines(s)
		print lines
		return render_template("story.html",lines=lines,title=s)
	else:
		button=request.form['button']
		if button=="Add":
			nline=str(request.form.get("line",""))
			#print nline
			if len(nline)>0:
				db.add_line(s, nline)
			#print "test"
			#lines=db.getLines(s)
			return redirect(url_for('story', s=s))
		elif button=="Back":
			return redirect(url_for("home"))
		lines=db.getLines(s)
		return render_template("story.html",lines=lines,title=s)
Exemple #3
0
def login():
    error = None
    if request.method == 'POST':
        global sig1
        sig1 = request.form['signature']
        passwd = hash(request.form['password'])
        ch = auth(sig1, passwd)
        if ch == 0:
            return redirect(url_for('online'))
        else:
            error = 'Invalid Credentials. Please try again.'
    return render_template('signin.html', error=error)
Exemple #4
0
def home():
	db.auth()
	if request.method == 'GET':
		titles=db.getTitles()
		return render_template("home.html",titles=titles)
	else:
		button=request.form["button"]
		if button == "Create!":
			newname=str(request.form["newtitle"])
			if len(newname)>0:
				db.add_story(newname)
			titles=db.getTitles()
			return redirect(url_for('home'))
		elif button=='Read!':
			selected=request.form.get("otitle","")
			titles=db.getTitles()
			return redirect(url_for('story',s=selected)) #story.html TBC
		elif button=='Drop Story':
			selected=request.form.get("drop","")
			db.remove_story(selected)
			return redirect(url_for('home'))
		return redirect(url_for('home'))
	return redirect(url_for('home'))
Exemple #5
0
def logar():

    dados = request.get_json(force=True)

    if dados['nome'] != '' and dados['senha'] != '':

        cur = conn.cursor()

        user = db.auth(dados, cur)

        print(user)

        if user != None:
            return jsonify(dados)
        #end if

    #end if
    return jsonify({"message": "dados inválidos."})
Exemple #6
0
def user():
    """
    exposes:
    http://..../[app]/default/user/login
    http://..../[app]/default/user/logout
    http://..../[app]/default/user/register
    http://..../[app]/default/user/profile
    http://..../[app]/default/user/retrieve_password
    http://..../[app]/default/user/change_password
    http://..../[app]/default/user/bulk_register
    use @auth.requires_login()
        @auth.requires_membership('group name')
        @auth.requires_permission('read','table name',record_id)
    to decorate functions that need access control
    also notice there is http://..../[app]/appadmin/manage/auth
    to allow administrator to manage users
    """
    form = auth()
    return dict(form=form)
Exemple #7
0
def main():
    arg_parser = argparse.ArgumentParser(
        description="Upload the given MacroFF macro file to the database")
    arg_parser.add_argument('--host',
                            type=str,
                            default='enterprise',
                            help='Host to connect to')
    arg_parser.add_argument('-u',
                            '--user',
                            type=str,
                            default='planner',
                            help='database user')
    arg_parser.add_argument('-p',
                            '--password',
                            type=str,
                            help='password of the database user')
    arg_parser.add_argument('-t',
                            '--type',
                            type=str,
                            help='the type of the macro (e.g., macroff)')
    arg_parser.add_argument('domain', type=str, help='the domain name')
    arg_parser.add_argument('macro_file', type=argparse.FileType('r'))
    args = arg_parser.parse_args()
    database = db.auth(args)
    domain_entry = database.domains.find_one({
        'name': args.domain,
        'augmented': {
            '$ne': True
        }
    })
    assert domain_entry, \
            'Could not find domain with name {} in database'.format(args.domain)
    domain_id = domain_entry['_id']
    print('Uploading macro file "{}" for domain "{}"'.format(
        args.macro_file.name, domain_id))
    database.macros.insert_one({
        'name': 'macroff-' + str(domain_id),
        'domain': domain_id,
        'type': args.type,
        'raw': args.macro_file.read()
    })
Exemple #8
0
 def process(self):
     # Checks what kind of request it needs to serve and routes to appropriate functions
     try:
         credentials = self.data.decode().split("/0")
         if (credentials[0] == "register"):
             try:
                 add_cred(credentials)
                 print("Successfully registered!")
             except:
                 return False
             return True
         elif (credentials[0] == "login"):
             try:
                 result = auth(credentials)
                 print(result)
                 if (result == "True"):
                     print("Logged in successfully")
                 else:
                     print("Error in logging in")
             except:
                 return False
             return result
     except Exception as e:
         print("Exception : Decoding error in process:", e)
Exemple #9
0
def main():
    parser = argparse.ArgumentParser(
        description='Upload a PDDL domain or problem file to the database,'
        ' and optionally start a Kubernetes job.')
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        default=0,
                        help='verbose output; '
                        '-v: print errors; -vv: print solutions')
    parser.add_argument('-c',
                        '--config-file',
                        help='config file to read database info from')
    parser.add_argument('-H', '--db-host', help='the database hostname')
    parser.add_argument('-u', '--db-user', help='the database username')
    parser.add_argument('-p', '--db-passwd', help='the database password')
    parser.add_argument('-i',
                        '--solution-id',
                        action='append',
                        dest='solution_ids',
                        help='an ID of a solution to parse')
    parser.add_argument('-f',
                        '--force',
                        action='store_true',
                        help='Update db entry even parsed solution already'
                        'exists')
    parser.add_argument('-a',
                        '--all',
                        action='store_true',
                        help='check the database for any unparsed solutions '
                        'and parse them')
    parser.add_argument('solutions',
                        metavar='problem_name',
                        nargs='*',
                        help='a name of a problem/solution to parse')

    args = parser.parse_args()
    database = db.auth(args)
    coll = database.solutions
    solution_entries = []
    if args.all:
        if args.force:
            solution_entries = list(coll.find())
        else:
            solution_entries = list(coll.find({'actions': {'$exists': False}}))
    if args.solution_ids:
        for solution_id in args.solution_ids:
            res = coll.find_one({'_id': bson.objectid.ObjectId(solution_id)})
            if res:
                solution_entries.append(res)
            else:
                print('Could not find ID {}, skipping!'.format(solution_id))
    if args.solutions:
        for solution in args.solutions:
            res = coll.find_one({'problem': solution})
            if res:
                solution_entries.append(res)
            else:
                print('Could not find problem {}, skipping!'.format(solution))
    for solution in solution_entries:
        if 'error' in solution:
            if args.verbose >= 1:
                print('Planner failed on problem {} with error "{}", '
                      'skipping!'.format(solution['problem'],
                                         solution['error']))
            continue
        if 'actions' in solution and not args.force:
            print('Solution {} is already parsed, '
                  'skipping!'.format(solution['_id']))
            continue
        try:
            raw_solution = solution['raw']
        except KeyError:
            print('ERROR: Solution for problem "{}" with ID {} has no "raw" '
                  'entry!'.format(solution['problem'], solution['_id']))
            continue
        domain = database.domains.find_one({'_id': solution['domain']})
        assert domain, \
                'No domain for solution {} found!'.format(solution['_id'])
        is_augmented = False
        if 'augmented' in domain and domain['augmented'] == True:
            is_augmented = True
        if is_augmented:
            extractor = dbmp.MacroExtractor()
            domain_string = domain['raw']
            extractor.extract_macros_from_string(domain_string)
            try:
                translated_solution = extractor.translate_solution(
                    raw_solution)
            except Exception as e:
                print('ERROR translating the solution of {}: "{}". '
                      'Skipping!'.format(solution['_id'], str(e)))
                continue
            parsed_solution = parse_solution(translated_solution)
        else:
            parsed_solution = parse_solution(raw_solution)
        if args.verbose >= 2:
            pp = pprint.PrettyPrinter()
            print('Result for ID {} (problem "{}"):'\
                    .format(solution['_id'], solution['problem']))
            pp.pprint(parsed_solution)
        coll.update(solution, {'$set': parsed_solution})
Exemple #10
0
def main():
    parser = argparse.ArgumentParser(
        description='Upload a PDDL domain or problem file to the database,'
        ' and optionally start a Kubernetes job.')
    parser.add_argument('-c',
                        '--config-file',
                        help='config file to read database info from')
    parser.add_argument('-H', '--db-host', help='the database hostname')
    parser.add_argument('-u', '--db-user', help='the database username')
    parser.add_argument('-p', '--db-passwd', help='the database password')
    parser.add_argument('--start-job',
                        action='store_true',
                        help='start a Kubernetes job for the given problem')
    parser.add_argument('-a',
                        '--all',
                        action='store_true',
                        help='start jobs for all problems in the domain')
    parser.add_argument('--all-missing',
                        action='store_true',
                        help='start jobs for all problems without a solution')
    parser.add_argument('--all-failed',
                        action='store_true',
                        help='start jobs for all problems that failed before')
    parser.add_argument('-t',
                        '--kubernetes-template',
                        help='the job template for the Kubernetes job')
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--training',
                       dest='phase',
                       default='training',
                       action='store_const',
                       const='training',
                       help='mark uploaded problems as training problems')
    group.add_argument('--validation',
                       dest='phase',
                       action='store_const',
                       const='validation',
                       help='mark uploaded problems as validation problems')
    group.add_argument('--test',
                       dest='phase',
                       action='store_const',
                       const='test',
                       help='mark uploaded problems as test problems')
    parser.add_argument('--skip-upload',
                        action='store_true',
                        help='do not upload the problem')
    dom_group = parser.add_mutually_exclusive_group(required=True)
    dom_group.add_argument('--domainfile', help='the domain file to add')
    dom_group.add_argument(
        '--domain', help='the name of the domain the problems belong to')
    dom_group.add_argument('--domain-id',
                           help='the ID of the domain the problems belong to')
    parser.add_argument('--problem',
                        action='append',
                        dest='problems',
                        help='Additional problem to start a job for')
    parser.add_argument('--planner', default='ff', help='the planner to use')
    parser.add_argument('problemfiles',
                        metavar='problemfile',
                        nargs='*',
                        help='the problem files to add')
    args = parser.parse_args()
    if args.problems:
        problems = set(args.problems)
    else:
        problems = set()
    database = db.auth(args)
    domain_coll = database.domains
    problem_coll = database.problems
    solution_coll = database.solutions
    if args.domainfile:
        domainfile = open(args.domainfile, 'r')
        domain_string = domainfile.read()
        domain_name = get_domainname(domain_string)
        if not args.skip_upload:
            assert domain_coll.find({ 'name': domain_name }).count() == 0, \
                'Domain "{}" already exists in the database.'.format(
                    domain_name)
            domain = domain_coll.insert({
                'name': domain_name,
                'raw': domain_string
            })
        if not domain:
            domain = domain_coll.find_one({
                'name': domain_name,
                'augmented': {
                    '$ne': True
                }
            })['_id']
    elif args.domain_id:
        domain = args.domain_id
        domain_entry = domain_coll.find_one(
            {'_id': bson.objectid.ObjectId(domain)})
        assert(domain_entry), \
                'Could not find domain with ID "{}"'.format(domain)
        domain_name = domain_entry['name']
    else:
        domain_name = args.domain
        domain_entry = domain_coll.find_one({'name': domain_name})
        assert domain_entry, \
                'Could not find domain with name "{}"'.format(domain_name)
        domain = domain_entry['_id']
    for problempath in args.problemfiles:
        problemfile = open(problempath, 'r')
        problem_string = problemfile.read()
        problem_name = get_problemname(problem_string)
        problem_domain = get_domain_of_problem(problem_string)
        assert problem_domain == domain_name, \
            'Domain "{}" in problem "{}" does not match given domain name ' \
            '"{}".'.format(problem_domain, problem_name, domain_name)
        if not args.skip_upload:
            assert problem_coll.find({ 'name': problem_name }).count() == 0, \
                'Problem "{}" already exists in database.'.format(problem_name)
            problem_id = problem_coll.insert({
                'name': problem_name,
                'domain': domain_name,
                'raw': problem_string,
                'phase': args.phase
            })
        else:
            problem_id = problem_coll.find_one({'name': problem_name})['_id']
        problems.add(problem_id)
    if args.all or args.all_missing or args.all_failed:
        all_problems = list(
            problem_coll.find({'domain': domain_name}, {'name': True}))
    if args.all:
        for problem in all_problems:
            problems.add(problem['_id'])
    if args.all_missing:
        for problem in all_problems:
            if not solution_coll.find_one({
                    'domain_id': domain,
                    'problem': problem['_id'],
                    'planner': args.planner
            }):
                problems.add(problem['_id'])
    if args.all_failed:
        for problem in all_problems:
            if solution_coll.find_one({
                    'domain_id': domain,
                    'problem': problem['_id'],
                    'raw': {
                        '$exists': False
                    }
            }):
                problems.add(problem['_id'])
    if args.start_job:
        for problem in problems:
            start_job(args.planner, args.kubernetes_template, domain, problem)
            print('---')
Exemple #11
0
def main():
    """ Test MacroAction with a macro from the test domain. """

    parser = argparse.ArgumentParser(
        description='Read frequent action patterns from the database and '
        'generate PDDL macro actions for those action patterns.')
    parser.add_argument('--domain', help='the domain the problems belong to')
    parser.add_argument('--domainfile',
                        help='path to the domain this macro belongs to')
    parser.add_argument('-c',
                        '--config-file',
                        help='config file to read database info from')
    parser.add_argument('-H', '--db-host', help='the database hostname')
    parser.add_argument('-u', '--db-user', help='the database username')
    parser.add_argument('-p', '--db-passwd', help='the database password')
    parser.add_argument('-s',
                        '--save',
                        action='store_true',
                        help='upload the resulting macro into the database')
    parser.add_argument('--from-db',
                        action='store_true',
                        help='fetch domain and actions from the database')
    parser.add_argument('-a',
                        '--all',
                        action='store_true',
                        help='generate macros for all action sequences in the '
                        'database for the given domain')
    parser.add_argument('--best',
                        type=int,
                        default=0,
                        help='limit to the the n most occurring sequences')
    parser.add_argument('-l',
                        '--occurrence-threshold',
                        type=int,
                        default=1,
                        help='the minimal number of occurrences of the action '
                        'sequence such that a macro is generated from it')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='print the generated macro actions')
    parser.add_argument(
        '-g',
        '--augment-domain',
        action='store_true',
        help='augment the domain with the macro and upload the '
        'resulting domain macro')
    parser.add_argument('-m',
                        '--max-num-macros',
                        type=int,
                        default=1,
                        help='the maximum number of macros to add to a domain')
    parser.add_argument('--min-actions',
                        type=int,
                        help='the minimum number of actions in the macro')
    parser.add_argument('--max-actions',
                        type=int,
                        help='the maximum number of actions in the macro')
    parser.add_argument('--num-actions',
                        type=int,
                        help='the exact number of actions in the macro')
    parser.add_argument('-e',
                        '--evaluate',
                        action='store_true',
                        help='evaluate the resulting macros for their '
                        'usefulness')
    parser.add_argument('--resource-file',
                        help='write resource usage to this file')
    parser.add_argument('--re-evaluate',
                        action='store_true',
                        help='re-evaluate the macros in the database')
    parser.add_argument('--best-evaluated',
                        type=int,
                        default=0,
                        help='only use the best n macros according to '
                        'the given evaluator')
    parser.add_argument('--evaluator',
                        type=str,
                        help='the macro evaluator to use for filtering')
    parser.add_argument('--store-best',
                        type=int,
                        default=0,
                        help='only store a macro if it is one of the best n'
                        ' domains, according to one domain evaluator')
    parser.add_argument(
        'action',
        nargs='*',
        help='an action and its parameters to include into the '
        'macro, e.g. "unstack 1,2"')
    args = parser.parse_args()
    assert(len(args.action) % 2 == 0), \
            'You need to specify parameters for each action, given actions: ' \
            + str(args.actions)
    assert(args.domain or args.domainfile), \
            'Please specify a domain name or a domain file'
    assert(args.max_num_macros > 0), \
            'Max number of macros needs to be higher than 0'
    if args.max_actions and args.min_actions:
        assert(args.max_actions >= args.min_actions), \
                'Maximum number of actions must be at least the minimum'
    if args.num_actions:
        assert(not args.min_actions), \
                'Conflicting arguments --num-actions and --min-actions'
        assert(not args.max_actions), \
                'Conflicting arguments --num-actions and --max-actions'
        args.min_actions = args.num_actions
        args.max_actions = args.num_actions
    if args.re_evaluate:
        args.from_db = True
        args.evaluate = True
        assert (not args.action), 'Cannot re-evaluate locally'
        assert (not args.save), 'Cannot save macros when re-evaluating'
        assert((not args.all) and (not args.augment_domain)), \
                'Cannot generate macro or augment domain when re-evaluating'
    if args.best_evaluated:
        args.evaluate = True
        assert (args.evaluator), 'Need evaluator to filter by score'
        assert(args.best_evaluated <= args.best or not args.best), \
                '--best must be >= --best-evaluated'
    if not args.domain:
        dfile = open(args.domainfile, 'r')
        domain_string = dfile.read()
        args.domain = get_domainname(domain_string)
    macros = set()
    total_num_actions = 1
    if args.from_db:
        database = db.auth(args)
        action_seqs_coll = database['action_sequences_' + args.domain]
        domain_coll = database.domains
        macros_coll = database.macros
        if not args.domainfile:
            domain_entry = domain_coll.find_one({'name': args.domain})
            assert(domain_entry), \
                'Could not find domain {} in the database!'.format(args.domain)
            tmpfile = tempfile.NamedTemporaryFile(mode='w')
            tmpfile.write(domain_entry['raw'])
            tmpfile.flush()
            args.domainfile = tmpfile.name
        if not args.domain:
            args.domain = get_domainname(args.domainfile)
        total_num_actions = 0
        domain_id = domain_coll.find_one({
            'name': args.domain,
            'augmented': {
                '$ne': True
            }
        })['_id']
        for sol in database.solutions.find({
                'domain': domain_id,
                'use_for_macros': True
        }):
            if 'actions' in sol:
                total_num_actions += len(sol['actions'])
        if args.all:
            query = {'value.domain': domain_id}
            if args.min_actions:
                query['value.actions.'+str(args.min_actions-1)] = \
                        { '$exists': True }
            if args.max_actions:
                query['value.actions.'+str(args.max_actions)] = \
                        { '$exists': False }
            for sequence in action_seqs_coll.find(query).sort([
                ('value.totalCount', -1)
            ]).limit(args.best):
                for parameters in sequence['value']['parameters']:
                    if parameters['count'] < args.occurrence_threshold:
                        continue
                    actions = sequence['value']['actions']
                    parameter_list = []
                    for params in parameters['assignment']:
                        parameter_list.append([int(param) for param in params])
                    m = MacroAction()
                    m.generate(args.domainfile, actions, parameter_list)
                    if m.initialized and m.macro:
                        m.count = int(parameters['count'])
                        m.domain = args.domain
                        macros.add(m)
                    else:
                        print('Failed to initialize macro with actions '
                              '{} and parameters {}.'.format(
                                  actions, parameter_list))
    if args.action:
        actions = args.action[0::2]
        parameters = []
        for params in args.action[1::2]:
            if params == 'none':
                parameters.append([])
            else:
                parameters.append([int(param) for param in params.split(',')])
        assert (args.domainfile), 'Domain was not specified'
        m = MacroAction()
        m.generate(args.domainfile, actions, parameters)
        if m.initialized:
            m.domain = args.domain
            # We don't really know the count, so assume it is 1.
            m.count = 1
            macros.add(m)
        else:
            print('Failed to initialize macro with actions '
                  '{} and parameters {}.'.format(actions, parameters))
    evaluators = []
    for weight in range(0, 101, 10):
        for lweight in range(0, 11):
            for cweight in range(0, 11):
                evaluators.append(
                    macro_evaluator.MCWithLengthWeightedFPEvaluator(
                        frequency_weight=weight,
                        frequency_normalizer=total_num_actions,
                        complementarity_weight=cweight,
                        length_weight=lweight))
    evaluators.append(macro_evaluator.PRSquaredEvaluator())
    if args.evaluate:
        evaluation_scores = []
        for macro in macros:
            evaluation = {}
            for evaluator in evaluators:
                evaluation[evaluator.name()] = evaluator.evaluate(macro)
            macro.evaluation = evaluation
            if args.best_evaluated:
                assert args.evaluator in evaluation, \
                        '{} not a valid evaluator. Evaluators: {}'.format(\
                            args.evaluator, list(evaluation.keys()))
                evaluation_scores.append(evaluation[args.evaluator])
        evaluation_scores.sort(reverse=True)
        if args.best_evaluated:
            assert args.evaluator, \
                    'Need an evaluator to check for the best macros'
            best_macros = set()
            for macro in macros:
                if macro.evaluation[args.evaluator] >= \
                   evaluation_scores[args.best_evaluated - 1]:
                    best_macros.add(macro)
            macros = best_macros
    for macro in macros:
        if args.save:
            macro._id = macros_coll.find_one_and_replace(
                {
                    'type': 'dbmp',
                    'actions': macro.actions,
                    'parameters': macro.parameters
                },
                macro.__dict__,
                upsert=True,
                return_document=pymongo.ReturnDocument.AFTER)['_id']
        if args.verbose:
            print(macro.__dict__)
    if args.augment_domain:
        assert(args.save), \
            'You must provide --save if you want to augment the domain'
        num_domains = 0
        best_evaluator_scores = {}
        for evaluator in evaluators:
            best_evaluator_scores[evaluator] = []
        for num_macros in range(1, args.max_num_macros + 1):
            for macro_combination in itertools.combinations(
                    macros, num_macros):
                if not unique_actions(macro_combination):
                    print('Skipping macro set due to non-unique action sets: '
                          '{}'.format([m.__str__()
                                       for m in macro_combination]))
                    continue
                # If we are not storing only the best domains, directly select
                # this one.
                selected = args.store_best <= 0
                evaluation = {}
                for evaluator in evaluators:
                    score = evaluator.evaluate_list(list(macro_combination))
                    evaluation[evaluator.name()] = score
                    best_scores = best_evaluator_scores[evaluator]
                    if len(best_scores) < args.store_best or \
                       -best_scores[args.store_best-1] < score:
                        if args.verbose:
                            print('Inserting {} with {} score {},'
                                  'best: {}'.format(
                                      [m.__str__() for m in macro_combination],
                                      evaluator, score, best_scores))
                        selected = True
                        # Store -score because bisect expects an increasing list
                        bisect.insort_left(best_scores, -score)
                if not selected:
                    continue
                domain_entry = domain_coll.find_one({
                    'name': args.domain,
                    'augmented': {
                        '$ne': True
                    }
                })
                assert(domain_entry), \
                        'Could not find domain {}'.format(args.domain)
                augmented_domain_entry = domain_entry
                # remove the ID so we can upload the domain as a new document
                augmented_domain_entry['base_domain'] = domain_entry['_id']
                del augmented_domain_entry['_id']
                augmented_domain_entry['macros'] = \
                        [ macro._id for macro in macro_combination ]
                augmented_domain_entry['augmented'] = True
                domain_string = domain_entry['raw']
                for macro in macro_combination:
                    domain_string = augment_domain(domain_string, macro.macro)
                augmented_domain_entry['raw'] = domain_string
                augmented_domain_entry['evaluation'] = evaluation
                if args.verbose:
                    print('Inserting {}'.format(augmented_domain_entry))
                    print('Evaluation: {}'.format(evaluation))
                updated_domain_id = domain_coll.find_one_and_replace(
                    {
                        'name': augmented_domain_entry['name'],
                        'macros': augmented_domain_entry['macros']
                    },
                    augmented_domain_entry,
                    upsert=True,
                    return_document=pymongo.ReturnDocument.AFTER)['_id']
                num_domains += 1
                if args.verbose:
                    print('Updated domain {}.'.format(updated_domain_id))
    if args.re_evaluate:
        for db_macro in macros_coll.find({'type': 'dbmp'}):
            macro = MacroAction()
            macro.from_db(db_macro)
            evaluation = {}
            for evaluator in evaluators:
                evaluation[evaluator.name()] = evaluator.evaluate(macro)
            macros_coll.update_one({'_id': db_macro['_id']},
                                   {'$set': {
                                       'evaluation': evaluation
                                   }})
        for domain in domain_coll.find({
                'augmented': True,
                'name': args.domain
        }):
            domain_macros = []
            for macro_id in domain['macros']:
                macro = MacroAction()
                macro.from_db(macros_coll.find_one({'_id': macro_id}))
                domain_macros.append(macro)
            evaluation = {}
            for evaluator in evaluators:
                evaluation[evaluator.name()] = \
                    evaluator.evaluate_list(domain_macros)
            domain_coll.update_one({'_id': domain['_id']},
                                   {'$set': {
                                       'evaluation': evaluation
                                   }})
    if args.verbose:
        print('Total number of macros: {}'.format(len(macros)))
        if args.augment_domain:
            print('Total number of augmented domains: {}.'.format(num_domains))
    if len(macros) > 0 and args.resource_file:
        with open(args.resource_file, 'a') as rfile:
            assert (args.num_actions)
            rfile.write('{} {}\n'.format(
                args.num_actions,
                resource.getrusage(resource.RUSAGE_CHILDREN)[0] / len(macros)))
Exemple #12
0
	def authdev(self, imei, imsi, key):
		#print "Checkauth IMEI:" + imei + " IMSI:"+imsi
		dbauth = auth()
		self.key = key
		#print "Got dbauth instance"
		return dbauth.checkauth(imei, imsi, key)
Exemple #13
0
def main():
    parser = argparse.ArgumentParser(
        description='Compute statistics and generate plots to analyze planner'
        'performance.')
    parser.add_argument('-H', '--db-host', help='the database hostname')
    parser.add_argument('-u', '--db-user', help='the database username')
    parser.add_argument('-p', '--db-passwd', help='the database password')
    parser.add_argument('-c',
                        '--config-file',
                        help='config file to read database info from')
    parser.add_argument('-a',
                        '--all',
                        action='store_true',
                        help='evaluate all domains')
    phase_group = parser.add_mutually_exclusive_group(required=True)
    phase_group.add_argument('--validation',
                             dest='phase',
                             action='store_const',
                             const='validation',
                             help='get stats for the validation group')
    phase_group.add_argument('--test',
                             dest='phase',
                             action='store_const',
                             const='test',
                             help='get stats for the test group')
    parser.add_argument('-d',
                        '--descriptives',
                        action='store_true',
                        help='get descriptives for the given domain and'
                        'planners')
    parser.add_argument('--best',
                        type=int,
                        default=0,
                        help='only get descriptives for the best n'
                        ' configurations')
    parser.add_argument('-t',
                        '--times-table',
                        action='store_true',
                        help='generate a latex table showing the results')
    parser.add_argument('--score-table',
                        action='store_true',
                        help='generate a latex table showing the scores')
    parser.add_argument('--config-table',
                        action='store_true',
                        help='generate a latex table showing the macro'
                        'configurations')
    parser.add_argument('--fit',
                        action='store_true',
                        help='add a linear fit to evaluator plots')
    parser.add_argument('--plot-evaluators',
                        action='store_true',
                        help='create plots to analyze evaluators')
    parser.add_argument('--plot-weights',
                        action='store_true',
                        help='create plots that show the performance change by'
                        ' changing one particular weight')
    parser.add_argument('--plot-against-planner',
                        action='store_true',
                        help='create a comparison plot between the best '
                        'DBMP domain and the original domains with the '
                        'given planners')
    parser.add_argument(
        '--plot-evaluator-heatmap',
        action='store_true',
        help='plot a heatmap showing the score depending on the'
        ' evaluator')
    parser.add_argument('-3',
                        '--plot-three',
                        action='store_true',
                        help='compare DBMP to two other planners')
    parser.add_argument('--meta',
                        action='store_true',
                        help='create a plot completion vs planning time '
                        'with all given domains in one plot')
    parser.add_argument('--planner',
                        action='append',
                        help='the planner to evaluate')
    parser.add_argument('--dbmp-planner',
                        action='append',
                        help='the planner used for DBMP macros')
    parser.add_argument('-e',
                        '--evaluator',
                        action='append',
                        default=[],
                        help='the evaluator to use')
    parser.add_argument('--evaluator-from-validation',
                        action='store_true',
                        help='use best evaluator from validation')
    parser.add_argument('--dbmp-domain',
                        type=str,
                        default='',
                        help='Domain ID of the DBMP domain to use instead of '
                        'the domain with the best evaluation score')
    parser.add_argument('domains',
                        metavar='domain',
                        nargs='*',
                        help='the name of the domain to evaluate')
    args = parser.parse_args()
    database = db.auth(args)
    assert(not (args.all and args.domains)), \
            'You cannot specify domain with --all'
    printer = pprint.PrettyPrinter()
    if args.all:
        domains = database.domains.distinct('name')
    else:
        domains = args.domains

    if args.plot_evaluator_heatmap:
        for planner in args.planner:
            evaluator_heatmap.plot_heatmap(database, planner, domains,
                                           args.phase)
    if args.plot_weights:
        for planner in args.planner:
            evaluator_plot.plot_weight_factors(database, planner, domains,
                                               args.phase)
    descriptives = {}
    for domain in domains:
        domain_descriptives = []
        if args.descriptives:
            for planner in args.planner:
                # original domain
                d = get_descriptives(database, domain, planner, args.phase)
                if d:
                    domain_descriptives.append(d)
                for evaluator in args.evaluator:
                    d = get_descriptives(database, domain, planner, args.phase,
                                         evaluator)
                    if d:
                        domain_descriptives.append(d)
            for planner in args.dbmp_planner:
                planner_descriptives = []
                if args.evaluator_from_validation:
                    best_evaluators = get_best_evaluators(
                        database, domain, planner)
                    print('best evaluator for {}, {}: {}'.format(
                        domain, planner, best_evaluators[0]))
                    evaluator = best_evaluators[0][0]
                    d = get_descriptives(database, domain, planner, args.phase,
                                         evaluator)
                    planner_descriptives.append(d)
                if args.best:
                    planner_descriptives = sorted(planner_descriptives,
                                                  key=itemgetter('score'),
                                                  reverse=True)
                    planner_descriptives = planner_descriptives[0:args.best]
                domain_descriptives += planner_descriptives


#            for planner in args.planner:
#                if not planner in domain_descriptives:
#                    domain_descriptives[planner] = {
#                            'solved': 0,
#                            'mean_length': 10000,
#                            'mean_time': MAX_TIME,
#                            'quantiles_length': [ 10000, 10000, 10000 ],
#                            'quantiles_time': [ MAX_TIME, MAX_TIME, MAX_TIME ],
#                        }
            descriptives[domain] = sorted(domain_descriptives,
                                          key=itemgetter('score'),
                                          reverse=True)

        if args.plot_evaluators:
            for evaluator in args.evaluator:
                plot_evaluation_vs_planning_time(database, domain, evaluator,
                                                 args.fit)
                plot_evaluation_vs_num_completions(database, domain, evaluator,
                                                   args.fit)
        if args.plot_against_planner:
            for planner in args.planner:
                for evaluator in args.evaluator:
                    plot_best_vs_other_planner(database, domain, planner,
                                               evaluator)
        if args.plot_three:
            assert (len(args.planner) == 2), 'Need two other planners.'
            for evaluator in args.evaluator:
                plot_three(database, domain, evaluator, args.planner[0],
                           args.planner[1])

    printer.pprint(descriptives)
    if args.times_table or args.score_table or args.config_table:
        results = {}
        for domain in domains:
            results[domain] = {}
            best_score = 0
            for planner in args.planner:
                for d in descriptives[domain]:
                    best_score = max(best_score, d['score'])
                    if d['planner'] == planner:
                        if d['config'] == 'original':
                            results[domain][planner] = d
                        else:
                            results[domain]['dbmp' + planner] = d
            results[domain]['best_score'] = best_score
        if args.times_table:
            generate_table(args.planner, domains, results, 'times')
        if args.score_table:
            generate_table(args.planner, domains, results, 'score')
        if args.config_table:
            generate_table(args.dbmp_planner, domains, results, 'config')

    if args.meta:
        if args.dbmp_domain:
            assert(len(args.evaluator) == 0), \
                    'Conflicting arguments, cannot use evaluator if domain ' \
                    'is given!'
            evaluator = 'custom'
            assert(len(args.domains) == 1), \
                    'Can only plot one domain if domain ID is given!'
        else:
            assert (len(args.evaluator) == 1), 'Expected exactly one evaluator'
            evaluator = args.evaluator[0]
        plot_meta(database, args.domains, args.planner, evaluator, True,
                  args.dbmp_domain)
Exemple #14
0
 def auth(self, username, password):
     return db.auth(username, password)
Exemple #15
0
def main():
    """ Main program.

    Run planning tasks for all given macros and problems in the domain. All
    parameters are given on the command line or in a config file.
    """
    parser = argparse.ArgumentParser(
        description='Run planners on domains augmented with macros.')
    parser.add_argument('-c',
                        '--config-file',
                        help='config file to read database info from')
    parser.add_argument('-H', '--db-host', help='the database hostname')
    parser.add_argument('-u', '--db-user', help='the database username')
    parser.add_argument('-p', '--db-passwd', help='the database password')
    parser.add_argument('--dry-run',
                        action='store_true',
                        help='dry-run, do not create jobs')
    parser.add_argument('--planner', default='ff', help='the planner to use')
    parser.add_argument('-t',
                        '--kubernetes-template',
                        help='the job template for the Kubernetes job')
    parser.add_argument('--domain', help='the name of the domain to run')
    parser.add_argument('--augmented-domain',
                        action='append',
                        help='an ID of an augmented domain to run')
    parser.add_argument('-a',
                        '--all',
                        action='store_true',
                        help='run all macros that fit the given criteria')
    parser.add_argument('--missing',
                        action='store_true',
                        help='run all problems without a solution')
    parser.add_argument('-o',
                        '--original-domain',
                        action='store_true',
                        help='also run the original domain')
    parser.add_argument('--macro-evaluator',
                        help='the name of the evaluation function '
                        'to use for filtering macros')
    parser.add_argument(
        '--best',
        type=int,
        default=0,
        help='limit to the n highest scoring macros or domains')
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--validation',
                       dest='phase',
                       default='validation',
                       action='store_const',
                       const='validation',
                       help='run benchmarks on the validation set')
    group.add_argument('--test',
                       dest='phase',
                       action='store_const',
                       const='test',
                       help='run benchmarks on the test set')
    parser.add_argument('--standard-evaluators',
                        action='store_true',
                        help='use the standard set of evaluators')
    parser.add_argument('domain_evaluators',
                        metavar='domain-evaluator',
                        nargs='*',
                        help='the evaluators to use for domain selection')
    args = parser.parse_args()
    database = db.auth(args)
    domain_coll = database.domains
    macro_coll = database.macros
    problem_coll = database.problems
    solutions_coll = database.solutions

    if args.standard_evaluators:
        args.domain_evaluators += evaluators.get_standard_evaluators()

    if args.augmented_domain:
        domains = set(args.augmented_domain)
    else:
        domains = set()
    if args.all or args.missing:
        if args.macro_evaluator:
            query = {'domain': args.domain}
            sorter = [('evaluation.' + args.macro_evaluator, -1)]
            for macro in \
                    macro_coll.find(query).sort(sorter).limit(args.best):
                domain = domain_coll.find_one({'macros': macro['_id']})
                if domain:
                    domains.add(domain['_id'])
                else:
                    print('Warning: Could not find a domain with macro {}. '
                          'Did you generate the augmented domain?'.format(
                              macro['_id']),
                          file=sys.stderr)
        domain_entries = list(
            domain_coll.find({
                'name': args.domain,
                'augmented': True
            }))
        if args.phase == 'validation':
            for evaluator in args.domain_evaluators:
                if args.best:
                    best_domain_entries = sorted(
                        domain_entries,
                        key=lambda x: x['evaluation'][evaluator],
                        reverse=True)[0:args.best]
                else:
                    best_domain_entries = domain_entries
                for domain in best_domain_entries:
                    domains.add(domain['_id'])
        else:
            assert args.phase == 'test', 'Unknown phase {}'.format(args.phase)
            assert args.best == 1, 'Test phase must run with args.best == 1'
            best_evaluators = stats.get_best_evaluators(
                database, args.domain, args.planner)[0:args.best]
            for evaluator, score in best_evaluators:
                print('Running evaluator {} with score {}'.format(
                    evaluator, score))
                domain = sorted(domain_entries,
                                key=lambda d: d['evaluation'][evaluator],
                                reverse=True)[0]
                domains.add(domain['_id'])

    if args.original_domain:
        original_domain = domain_coll.find_one({
            'name': args.domain,
            'augmented': {
                '$ne': True
            }
        })
        assert(original_domain), \
                'Could not find unaugmented domain {}.'.format(args.domain)
        domains.add(original_domain['_id'])
    for domain in domains:
        for problem in problem_coll.find({
                'domain': args.domain,
                'phase': args.phase
        }):
            if not args.all and solutions_coll.find_one(
                {
                    'domain': bson.objectid.ObjectId(domain),
                    'problem': bson.objectid.ObjectId(problem['_id']),
                    'planner': args.planner,
                    'use_for_macros': {
                        '$ne': True
                    }
                }):
                # solution already exists, skip this problem
                continue
            if args.dry_run:
                print('Job: {}-{}-{}'.format(args.planner, domain,
                                             problem['_id']))
            else:
                start_job(args.planner, args.kubernetes_template, domain,
                          problem['_id'])
Exemple #16
0
 def auth(self, username, password):
     return db.auth(username, password)
 def authenticate(self):
     ch = auth(self.sig, self.password)
     if ch == 0:
         print("Welcome  " + self.sig)
     else:
         print("Signature or Password Invalid")
Exemple #18
0
def main():
    """ Connect to the database and check all plans that are not validated. """
    parser = argparse.ArgumentParser(
        description='Connect to the database and check all plans that are not'
        ' validated.')
    parser.add_argument('-c',
                        '--config-file',
                        help='config file to read database info from')
    parser.add_argument('-f',
                        '--force',
                        action='store_true',
                        help='also validate solutions which have already been'
                        ' validated')
    args = parser.parse_args()
    database = db.auth(args)
    domain_coll = database.domains
    macro_coll = database.macros
    problem_coll = database.problems
    solution_coll = database.solutions
    query = {'error': {'$exists': False}}
    if not args.force:
        query['validated'] = {'$ne': True}
    for solution in solution_coll.find(query):
        try:
            domain = domain_coll.find_one({'_id': solution['domain']})['raw']
            domain_file = tempfile.NamedTemporaryFile(mode='w')
            domain_file.write(domain)
            domain_file.flush()
            problem = problem_coll.find_one({'_id':
                                             solution['problem']})['raw']
            problem_file = tempfile.NamedTemporaryFile(mode='w')
            problem_file.write(problem)
            problem_file.flush()
            solution_file = tempfile.NamedTemporaryFile(mode='w')
            macro_extractor = dbmp.MacroExtractor()
            solution_file.write(
                macro_extractor.translate_solution(solution['raw']))
            solution_file.flush()
            val_res = subprocess.run([
                'pddl-validate', domain_file.name, problem_file.name,
                solution_file.name
            ],
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.STDOUT,
                                     universal_newlines=True)
            plan_valid = False
            for line in val_res.stdout.splitlines():
                if line == 'Plan valid':
                    plan_valid = True
                    break
            solution_coll.update({'_id': solution['_id']}, {
                '$set': {
                    'validated': True,
                    'validation_success': plan_valid,
                    'validation_log': val_res.stdout
                }
            })
            if not plan_valid:
                print('Error validating solution {}! VAL output:\n{}'.format(
                    solution['_id'], val_res.stdout))
        except Exception as e:
            logging.error('Failed to validate solution {}: {}\n{}'.format(
                solution['_id'], e, traceback.format_exc()))