Exemplo n.º 1
0
    def _modify_problem(f_problem, f_out, tr_outfiles):
        """Modify the problem by writing it to a new file.
		Return the name of the new problem file."""

        f_init_states = tr_outfiles["init_states"]
        f_axioms = tr_outfiles["axioms"]

        # the problem
        problem = get_contents(f_problem)
        problem_tree = LispParser.get_tree(problem)

        # the preferences
        init_states = get_contents(f_init_states)
        init_states_tree = LispParser.get_tree(init_states)
        TreeHanger.add_init_state_prefs(problem_tree, init_states_tree)

        # the axioms
        axioms = get_contents(f_axioms)
        axiom_tree = LispParser.get_tree(axioms)
        TreeHanger.add_metric_functions(problem_tree, axiom_tree)

        # write to the new problem file
        fp = open(f_out, "w")
        fp.write(problem_tree.to_lisp())
        fp.close()

        return f_out
Exemplo n.º 2
0
	def _modify_problem(f_problem, f_out, tr_outfiles):
		'''Modify the problem by writing it to a new file.
		Return the name of the new problem file.'''
		
		f_init_states = tr_outfiles["init_states"]
		f_axioms = tr_outfiles["axioms"]
		
		# the problem
		problem = get_contents(f_problem)
		problem_tree = LispParser.get_tree(problem)
		
		# the preferences
		init_states = get_contents(f_init_states)
		init_states_tree = LispParser.get_tree(init_states)
		TreeHanger.add_init_state_prefs(problem_tree, init_states_tree)
		
		# the axioms
		axioms = get_contents(f_axioms)
		axiom_tree = LispParser.get_tree(axioms)
		TreeHanger.add_metric_functions(problem_tree, axiom_tree)
		
		# write to the new problem file
		fp = open(f_out, "w")
		fp.write(problem_tree.to_lisp())
		fp.close()
		
		return f_out
Exemplo n.º 3
0
    def _modify_domain(f_domain, f_out, tr_outfiles):
        """Create a new domain file. Return the name of the new file."""

        f_add_effects = tr_outfiles["add_effects"]
        f_del_effects = tr_outfiles["del_effects"]
        f_axioms = tr_outfiles["axioms"]

        # the domain
        domain = get_contents(f_domain)
        domain_tree = LispParser.get_tree(domain)

        # add and del effects
        add_effects_tree = LispParser.get_tree(get_contents(f_add_effects))
        del_effects_tree = LispParser.get_tree(get_contents(f_del_effects))
        TreeHanger.add_add_del_effects(domain_tree, add_effects_tree, del_effects_tree)

        # domain prefs
        axioms = get_contents(f_axioms)
        axiom_tree = LispParser.get_tree(axioms)
        TreeHanger.add_domain_prefs(domain_tree, axiom_tree)

        # write to the new domain file
        fp = open(f_out, "w")
        fp.write(domain_tree.to_lisp())
        fp.close()

        return f_out
Exemplo n.º 4
0
	def _modify_domain(f_domain, f_out, tr_outfiles):
		'''Create a new domain file. Return the name of the new file.'''
		
		f_add_effects= tr_outfiles["add_effects"]
		f_del_effects= tr_outfiles["del_effects"]
		f_axioms = tr_outfiles["axioms"]
		
		# the domain
		domain = get_contents(f_domain)
		domain_tree = LispParser.get_tree(domain)
		
		# add and del effects
		add_effects_tree = LispParser.get_tree(get_contents(f_add_effects))
		del_effects_tree = LispParser.get_tree(get_contents(f_del_effects))
		TreeHanger.add_add_del_effects(domain_tree, add_effects_tree, del_effects_tree)
		
		# domain prefs
		axioms = get_contents(f_axioms)
		axiom_tree = LispParser.get_tree(axioms)
		TreeHanger.add_domain_prefs(domain_tree, axiom_tree)
		
		# write to the new domain file
		fp = open(f_out, "w")
		fp.write(domain_tree.to_lisp())
		fp.close()
		
		return f_out
Exemplo n.º 5
0
    def get(request):
        resp = Response()
        contents = get_contents(request)
        if contents:
            acct_id = contents.get('id', None)
            acct_email = contents.get('email', None)
            try:
                if acct_id:
                    AccountProfile.objects.get(id=acct_id)
                    resp.passed()
                    resp.add_param('result', True)
                elif acct_email:
                    AccountProfile.objects.get(company_email__icontains=acct_email)
                    resp.passed()
                    resp.add_param('result', True)

            except AccountProfile.DoesNotExist:
                resp.passed()
                resp.add_param('result', False)

            except AccountProfile.MultipleObjectsReturned:
                resp.passed()
                resp.add_param('result', False)
        else:
            resp.failed()
            resp.add_message('Missing parameters!')

        return HttpResponse(resp.get_response(), content_type="application/json")
Exemplo n.º 6
0
    def get(request, qry_type):
        resp = Response()
        contents = get_contents(request)
        if contents:
            qry_id = contents.get('id', None)
            qry_email = contents.get('email', None)
            try:
                accnt = None
                if qry_type == 'id':
                    accnt = AccountProfile.objects.get(id=qry_id)
                elif qry_type == 'email':
                    accnt = AccountProfile.objects.get(company_email__icontains=qry_email)

                a_obj = {'id': accnt.id, 'name': accnt.name, 'email': accnt.company_email,
                         'mobile': str(accnt.company_phone), 'address_info': accnt.company_address.address_info,
                         'country': accnt.company_address.country.name, 'city': accnt.company_address.city.name}

                resp.passed()
                resp.add_param('result', a_obj)

            except AccountProfile.DoesNotExist:
                resp.failed()
                resp.add_message('AccountProfile with ID/Email does not exist')

            except AccountProfile.MultipleObjectsReturned:
                resp.failed()
                resp.add_message('Multiple AccountProfiles matches the Id/Email')

        return HttpResponse(resp.get_response(), content_type="application/json")
Exemplo n.º 7
0
    def create(fname):
        """Create a PDDL Tree out of the given PDDL file."""

        pddl_list = PDDL_Tree._get_pddl_list(get_contents(fname))
        pddl_tree = PDDL_Tree._make_tree(pddl_list)
        PDDL_Tree._alter_tree(pddl_tree)
        return pddl_tree
Exemplo n.º 8
0
 def compare(f_baseline, f_generated):
     '''Compare the given generated file to the baseline file.'''
     
     baseline_tokens = LispParser().get_tokens(get_contents(f_baseline))
     generated_tokens = LispParser().get_tokens(get_contents(f_generated))
 
     i = j = 0
     len_match = token_match = True
     
     if len(baseline_tokens) != len(generated_tokens):
         print "different amount of tokens"
         len_match = False
     
     while i < len(baseline_tokens) and j < len(generated_tokens):
         if baseline_tokens[i] != generated_tokens[j]:
             token_match = False
             
             print "==> difference"
             
             print "Context for baseline:"
             print " ".join(baseline_tokens[i - LispDiff.CONTEXT_WINDOW : i + LispDiff.CONTEXT_WINDOW])
             
             print "Context for generated:"
             print " ".join(generated_tokens[j - LispDiff.CONTEXT_WINDOW : j + LispDiff.CONTEXT_WINDOW])
         
             # here we need a way to try to line them up again
             for ii in range(-1 - LispDiff.ALIGN_WINDOW, 1 + LispDiff.ALIGN_WINDOW):
                 if ii == 0: continue
                 
                 if baseline_tokens[i + ii : i + ii + LispDiff.MATCH_WINDOW + 1] == generated_tokens[j : j + LispDiff.MATCH_WINDOW + 1]:
                     if ii > 0:
                         print "===> roll forward %d chars" % ii
                     else:
                         print "===> roll back %d chars" % (-1 * ii)
                     
                     i += ii
                     break
             
         i += 1
         j += 1
         
     if token_match and not len_match:
         print "Tokens all match even though lengths are different"
         print len(baseline_tokens)
         print len(generated_tokens)
      
     return len_match and token_match
Exemplo n.º 9
0
    def post(request):
        resp = Response()
        contents = get_contents(request)
        validation = validate_signup_composite(contents, resp)
        if validation:
            # Update Account
            update_client_account(contents, resp)

        return HttpResponse(resp.get_response(), content_type="application/json")
Exemplo n.º 10
0
    def post(request, accnt_type):
        resp = Response()
        contents = get_contents(request)
        validation = validate_signup_composite(contents, resp)
        if validation:
            # Create signup composite
            create_signup_composite(contents, accnt_type, resp)

        return HttpResponse(resp.get_response(), content_type="application/json")
Exemplo n.º 11
0
    def post(request):
        resp = Response()
        contents = get_contents(request)
        enquiry_form = SiteEnquiryForm(contents)
        if enquiry_form.is_valid():
            enquiry_form.save()
            resp.passed()
            resp.add_message("Thank you. We received your enquiry and we'll get in touch soonest.")
        else:
            resp.failed()
            resp.add_param('errors', compile_form_errors(enquiry_form))

        return HttpResponse(resp.get_response(), content_type="application/json")
Exemplo n.º 12
0
    def get(request):
        resp = Response()
        contents = get_contents(request)
        contents = get_contents(request)
        if contents:
            search_string = contents.get('pattern', None)
            if search_string:
                try:
                    results = UserProfile.objects.filter(active=True, account_manager=True,
                                                         name__icontains=search_string).order_by('id').values('id', 'name')
                    matches = [x for x in results]
                    resp.passed()
                    resp.add_param('matches', matches)

                except Exception as err:
                    resp.failed()
                    resp.add_message(str(err))
        else:
            resp.failed()
            resp.add_message('Missing request parameters.')

        return HttpResponse(resp.get_response(), content_type="application/json")
Exemplo n.º 13
0
def benchmark_seek_all():
    tree = Parser().get_tree(get_contents('samples/gripper-domain.pddl'))

    print "==> generator:"
    start = time.time()
    #for i in xrange(1):
    tree.seek_all([':action'])
    print(time.time() - start)  #* 1000

    print "==> list"
    start = time.time()
    #for i in xrange(1):
    tree.seek_all_list([':action'])
    print(time.time() - start)  #* 1000
Exemplo n.º 14
0
def benchmark_seek_all():
    tree = Parser().get_tree(get_contents('samples/gripper-domain.pddl'))
    
    print "==> generator:"
    start = time.time()
    #for i in xrange(1):
    tree.seek_all([':action'])
    print (time.time() - start) #* 1000
    
    print "==> list"
    start = time.time()
    #for i in xrange(1):
    tree.seek_all_list([':action'])
    print (time.time() - start) #* 1000
Exemplo n.º 15
0
    def post(self, request, **kwargs):
        customer_type = kwargs.get('account_type', None)
        contents = get_contents(request)
        resp = Response()
        if contents:
            _pageSize = contents.get('pageSize', 10)
            _pageIndex = contents.get('pageIndex', 1)
            _filter = contents.get('filters', [])
            _sort = contents.get('sort', None)

            kwargs = build_query_filter(_filter)
            pager = build_pagination_markers(_pageIndex, _pageSize)

            # Run Query
            accounts = None
            if customer_type == 'pod':
                kwargs['account_type__name__iexact'] = 'pod'
            elif customer_type == 'streamz':
                kwargs['account_type__name__iexact'] = 'streamz'

            result_size = AccountProfile.objects.filter(**kwargs).count()
            results = AccountProfile.objects.filter(**kwargs)[pager['lm']:pager['um']]

            try:
                accounts = [{'id': x.id, 'name': x.name, 'account_code': x.account_code,
                             'company_email': x.company_email,
                             'account_officer': {'id': x.account_officer.id, 'label': x.account_officer.name}
                             if x.account_officer is not None else {'id': "",'label': ""},
                             'industry': {'id': x.industry.id, 'label': x.industry.name},
                             'company_address': {'address_info': x.company_address.address_info,
                                                 'city': {'id': x.company_address.city.id,
                                                          'label': x.company_address.city.name},
                                                 'country': {'id': x.company_address.country.id,
                                                             'label': x.company_address.country.name}
                                                 },
                             'company_phone': {'country_code': x.company_phone.country_code,
                                               'phone_number': x.company_phone.phone_number},
                             'contact_person': {'first_name': x.contact_person.first_name,
                                                'last_name': x.contact_person.last_name}} for x in results]

                resp.passed()
                resp.add_param('result', accounts)
                resp.add_param('result_size', result_size)

            except Exception as err:
                resp.failed()
                resp.add_message(str(err))

        return HttpResponse(resp.get_response(), content_type="application/json")
Exemplo n.º 16
0
def profile_tree(fname):
    '''Run the PDDL parser on the given file.
    Extract and show meaningful information.'''
    
    contents = get_contents(fname)
    parser = Parser()
    tree = parser.get_tree(contents)
    
    print "==> tree-type:",
    t = tree.get_type()
    print "'%s'" % t
    
    print "==> domain: '%s'" %  tree.get_domain()
    
    if t == "problem":
        print "==> problem: '%s'" % tree.get_problem()
    
        print "==> objects:"
        print tree.get_objects()
        
        print "==> init state:"
        print tree.get_init_state()
        
        print "==> goal:"
        print tree.get_goal()
        
    else:
        print "==> predicates:"
        print tree.get_predicates()
        
        print "==> actions:"
        for a in tree.get_actions():
            print "==> action: '%s'" % a.get_action_name()
            #a.print_tree()
            
            print "==> parameters: "
            print a.get_parameters()
            
            print "==> preconditions: "
            print a.get_preconditions()
            
            print "==> effects"
            print a.get_effects()
Exemplo n.º 17
0
def profile_tree(fname):
    '''Run the PDDL parser on the given file.
    Extract and show meaningful information.'''

    contents = get_contents(fname)
    parser = Parser()
    tree = parser.get_tree(contents)

    print "==> tree-type:",
    t = tree.get_type()
    print "'%s'" % t

    print "==> domain: '%s'" % tree.get_domain()

    if t == "problem":
        print "==> problem: '%s'" % tree.get_problem()

        print "==> objects:"
        print tree.get_objects()

        print "==> init state:"
        print tree.get_init_state()

        print "==> goal:"
        print tree.get_goal()

    else:
        print "==> predicates:"
        print tree.get_predicates()

        print "==> actions:"
        for a in tree.get_actions():
            print "==> action: '%s'" % a.get_action_name()
            #a.print_tree()

            print "==> parameters: "
            print a.get_parameters()

            print "==> preconditions: "
            print a.get_preconditions()

            print "==> effects"
            print a.get_effects()
def main():
    title = sys.argv[1]
    client = HttpClient()
    params = {
        'action': 'query',
        'format': 'json',
        'prop': 'revisions',
        'rvprop': 'content',
        'titles': title,
    }
    data = client.get(URL, params)
    if data is not None:
        contents = get_contents(data)
        entities = get_entities(contents)
        pprint(entities)
        print('=' * 72)

        print('\nカテゴリのみを出力')
        for category in filter(lambda x: x.startswith('Category:'), entities):
            print(category)
 def synthesizeAllImages(self, image_number):
     self.bg_img_list = utils.getBackgroundListFromDir(
         self.args['background_image_dir'])
     self.font_list = utils.getFontListFromDir(self.args['fonts_dir'])
     start_index = self.restoreFromPartImageDir()
     for i in tqdm.tqdm(range(start_index, image_number)):
         content, content_index = utils.get_contents(
             self.id_character_dict, self.args['characters_length_tuple'])
         background_image_path, font_path = map(
             utils.getRandomOneFromList, [self.bg_img_list, self.font_list])
         image, points = self.putContent2Image(content,
                                               background_image_path,
                                               font_path,
                                               self.args['add_rectangle'])
         if self.args['save_full_image']:
             self.saveImage(image, i)
         part_images, roi_points = utils.cropImageByPoints(image, points)
         self.saveImage(part_images, i, is_part=1)
         self.saveAnnotation(content_index, points, i)
     return
Exemplo n.º 20
0
  def Iop_Add64(self, left, right): return utils.mask(left + right)
  def Iop_Add32(self, left, right): return utils.mask(left + right, 32)
  def Iop_Add8(self, left, right):  return utils.mask(left + right, 8)

  def Iop_Sub64(self, left, right): return utils.mask(left - right)
  def Iop_Sub32(self, left, right): return utils.mask(left - right, 32)

  def Iop_Shl64(self, left, right): return utils.mask(left << right)
  def Iop_Shl32(self, left, right): return utils.mask(left << right, 32)

  def Iop_CmpEQ64(self, left, right): return 1 if utils.mask(left, 64) == utils.mask(right, 64) else 0
  def Iop_CmpEQ32(self, left, right): return 1 if utils.mask(left, 32) == utils.mask(right, 32) else 0

  def Iop_CmpNE64(self, left, right): return 1 if utils.mask(left, 64) != utils.mask(right, 64) else 0
  def Iop_CmpNE32(self, left, right): return 1 if utils.mask(left, 32) != utils.mask(right, 32) else 0

if __name__ == "__main__":
  import sys
  if len(sys.argv) < 3:
    print "Usage: classifier.py architecture filename [-v]"
    sys.exit(1)

  arch = archinfo.arch_from_id(sys.argv[1]).__class__
  code = utils.get_contents(sys.argv[2])

  classifier = GadgetClassifier(arch, log_level = logging.DEBUG if len(sys.argv) > 3 else logging.WARNING)
  gadgets = classifier.create_gadgets_from_instructions(code, 0x40000)
  for g in gadgets:
    print g
Exemplo n.º 21
0
            for v_index, vocab_count in enumerate(vocab_count_list):
                score += vocab_count * math.log(
                    vocab_count_matrix[c_index][v_index])
            score_list.append(score)

        class_result.append(score_list.index(max(score_list)) + 1)

    return class_result


file_indices = get_file_indices('training.txt')
stopword_list = get_stopword_list('stopwords.txt')

contents_list = []
for i in range(CLASSES):
    content_by_class = get_contents(file_indices[i])
    contents_list.append(content_by_class)  # length is 13

# further processing for contents
updated_contents_list = []
for contents in contents_list:
    updated_contents = process_content(stopword_list, contents)
    updated_contents_list.append(updated_contents)

vocab_list = get_vocab_list('vocabs.txt')
vocab_prob_matrix = train_multinomial_model(vocab_list, updated_contents_list)

doc_list = get_contents(list(range(1, 1096)))
updated_doc_list = process_content(stopword_list, doc_list)
class_result = test_multinomial_model(vocab_prob_matrix, vocab_list,
                                      updated_doc_list)
Exemplo n.º 22
0
    # There, we only load a pair of images. one is style image, the other is content image.
    content_img = image.imread(os.path.join(args.image_root, "tubingen.jpg"))
    style_img = image.imread(os.path.join(args.image_root, "the_scream.jpg"))
    # image.imread(): load a image, return NDArray. data format is BGR, w.t. HWC.
    # imshow
    '''
    plt.imshow(style_img.asnumpy()) # use asnumpy() transform to numpy ndarray.
    plt.show()
    plt.imshow(content_img.asnumpy())
    plt.show()
    '''
    # generate content image and features.
    content_x, content_y = utils.get_contents(
        net=net,
        ctx=ctx,
        content_img=content_img,
        image_shape=args.image_shape,
        content_layers=args.content_layers,
        style_layers=args.style_layers)
    # content_x is the content image, content_y is the features of content image(list of features).
    # Our need is content_y. Features.
    # content_x is NDArray.

    # generate style image and features.
    style_x, style_y = utils.get_styles(net=net,
                                        ctx=ctx,
                                        style_img=style_img,
                                        image_shape=args.image_shape,
                                        content_layers=args.content_layers,
                                        style_layers=args.style_layers)
    # style_x is the style image, style_y is the features of style image(list of features).
Exemplo n.º 23
0
    final_vocabs = []
    for vocabs in vocab_list:
        for vocab in vocabs:
            final_vocabs.append(vocab[0])

    final_vocabs = list(set(final_vocabs))[:500]
    with open(vocabs_file, 'w') as f:
        for vocab in final_vocabs:
            f.write(vocab + '\n')


file_indices = get_file_indices('training.txt')
stopword_list = get_stopword_list('stopwords.txt')

contents_list = []
terms_list = []
for i in range(CLASSES):
    content_by_class = get_contents(file_indices[i])
    contents_list.append(content_by_class)  # length is 13
    terms_by_class = generate_terms(content_by_class, stopword_list)
    terms_list.append(terms_by_class)  # length is 13

# further processing for contents
updated_contents_list = []
for contents in contents_list:
    updated_contents = process_content(stopword_list, contents)
    updated_contents_list.append(updated_contents)

total_LLR_list = feature_selection(updated_contents_list, terms_list)
generate_vocabs(total_LLR_list, 'vocabs.txt')
Exemplo n.º 24
0
        return utils.mask(left >> right, 8)

    def Iop_CmpEQ64(self, left, right):
        return 1 if utils.mask(left, 64) == utils.mask(right, 64) else 0

    def Iop_CmpEQ32(self, left, right):
        return 1 if utils.mask(left, 32) == utils.mask(right, 32) else 0

    def Iop_CmpNE64(self, left, right):
        return 1 if utils.mask(left, 64) != utils.mask(right, 64) else 0

    def Iop_CmpNE32(self, left, right):
        return 1 if utils.mask(left, 32) != utils.mask(right, 32) else 0


if __name__ == "__main__":
    import sys
    if len(sys.argv) < 3:
        print "Usage: classifier.py architecture filename [-v]"
        sys.exit(1)

    arch = archinfo.arch_from_id(sys.argv[1]).__class__
    code = utils.get_contents(sys.argv[2])

    classifier = GadgetClassifier(
        arch,
        log_level=logging.DEBUG if len(sys.argv) > 3 else logging.WARNING)
    gadgets = classifier.create_gadgets_from_instructions(code, 0x40000)
    for g in gadgets:
        print g