コード例 #1
0
def memory_search_api():
    source = request.args.get('source')
    target = request.args.get('target')
    project = request.args.get('project')

    search = Search(source, target, project)
    return Response(search.get_json(), mimetype='application/json')
コード例 #2
0
  def run(self):
    
       
    # Main loop
    while True:
      # Perform a simulation step of 64 milliseconds
      # and leave the loop when the simulation is over
      if self.step(64) == -1:
        break
      
      #self.forward()
      # Read the sensors:
      prox = self.get_proximities()
      #arg1 = self.get_lights()
      #arg2 = self.get_accelleration()
      # Process sensor data here.

      SearchObject = Search()
      output1 = SearchObject.determine_action(prox)

      if(output1[0]):
        speeds = output1[1]
        self.move_wheels(speeds[0],speeds[1],1.0)
      else:
        self.stop()
      #Search.determine_action(True)
      #Stagnation.determine_action(True)
      
      # Enter here functions to send actuator commands, like:
      print output1
コード例 #3
0
ファイル: manage.py プロジェクト: actank/zmon
 def GET(self):
     logging.debug(self.input)
     userName = auth.uuap_sso()
     products = auth.getProcByName(userName)
     if products==None or len(products)==0:
         web.config.session.kill()
         return render.forbidden(userName)
     if self.input == None or len(self.input) == 0:
         return render.manage(userName, products, web.ctx.homedomain)
     #更新树形菜单
     if self.input['oper'] == 'updatelist':
         #检查文件夹是否存在,不存在则创建
         dirPath = 'data/updateflag'
         product = self.input['product']
         updateflag = '%s/%s' % (dirPath,product)
         if not os.path.exists(dirPath):
             os.makedirs(dirPath)
         #创建更新标志文件以触发updatelist.sh的更新任务
         f = open(updateflag, 'w')
         f.close()
         while os.path.exists(updateflag):
             time.sleep(0.1)
         return json.dumps({'code':200,'txt':u'更新成功'})
     elif self.input['oper'] == 'search':
         s = Search(self.input)
         return s.search()
コード例 #4
0
class BookSearchTests(WLTestCase):
    def setUp(self):
        WLTestCase.setUp(self)

        index = Index()
        self.search = Search()
        index.delete_query(self.search.index.query(uid="*"))
        index.index.commit()

        with self.settings(NO_SEARCH_INDEX=False):
            self.do_doktora = Book.from_xml_file(
                get_fixture('do-doktora.xml', opds))
            self.do_anusie = Book.from_xml_file(
                get_fixture('fraszka-do-anusie.xml', catalogue))

    def test_search_perfect_book_author(self):
        books = self.search.search_books(self.search.index.query(authors=u"sęp szarzyński"))
        assert len(books) == 1
        assert books[0].id == self.do_anusie.id

        # here we lack slop functionality as well
    def test_search_perfect_book_title(self):
        books = self.search.search_books(self.search.index.query(title=u"fraszka do anusie"))
        assert len(books) == 1
        assert books[0].id == self.do_anusie.id
コード例 #5
0
        def searchAction(event):
            try:
                global query
                global mTime
                mTime = StringVar(None)
                query = StringVar(None)
                search_ = Search()
                query = self.searchEntry1.get()

                if query not in (None, '', ' '):
                    start_time = time.time()
                    global title_url
                    title_url = [None]
                    raw = search_.fetch_url(query)
                    title_url = search_.process_url(raw)
                    self.masters.withdraw()
                    mTime = (time.time() - start_time)
                    self.guiresults()
                    self.timeLabelres = Label(self.masters, text="--- %s seconds ---" % (mTime), font=('arial',8))
                    self.timeLabelres.place(x=150,y=630)
                    
                else:
                    tkMessageBox.showinfo('Info', 'You must put a keyword')
            except Exception as e:
                    tkMessageBox.showinfo('Info', 'No Internet Connection Try Again Later')
                    exit()
コード例 #6
0
ファイル: actions.py プロジェクト: AnithaT/eden
 def searchMatchesFound(self, searchString=None):
     """ Helper function used to return the number of search results """
     self.startCoverage("searchMatchesFound")
     s = Search(self)
     result = s.searchMatchesFound(searchString)
     self.endCoverage()
     return result
コード例 #7
0
ファイル: views.py プロジェクト: mtpoutanen/lbog
def search_projects(request):
    '''
    comments
    '''
    if request.method == 'GET':
        search      = Search(request)
        context = search.get_search_context(search.SEARCH_PROJECTS)
        return render_to_response('project_search_results.html', context)
コード例 #8
0
ファイル: application.py プロジェクト: yiyizhai/zone_blog
def search_json():
    keyword = request.data
    para = json.loads(keyword)['keyword']
    c_log('search', para).save()
    target = Search()
    if keyword == None:
        return 'NONE'
    return target.fetch(para, 3, 1)  # keyword limit isjson
コード例 #9
0
ファイル: xwlists.py プロジェクト: roshow/xwlists
def get_search_results():
    try:
        search_text = request.json['search-text']
        s = Search( search_text )
        results = s.search()
        return render_template( 'search_results.html', results=results), 200
    except ValueError, e:
        return render_template( 'search_error.html', errortext=str(e))
コード例 #10
0
ファイル: views.py プロジェクト: ialibekov/infoproject
def go_search(query, need_suggest=True):
    search = Search()
    if (need_suggest):
        suggest = search.generate_suggest(query)
    else:
        suggest = ""
    result = search.go(query)
    return result, suggest
コード例 #11
0
ファイル: views.py プロジェクト: mtpoutanen/lbog
def search_developers(request):
    '''
    comments
    '''
    if request.method == 'GET':
        search      = Search(request)
        context = search.get_search_context(search.SEARCH_DEVELOPERS)
        return render_to_response('developer_search_results.html', context)
コード例 #12
0
ファイル: downloads.py プロジェクト: MaximeCheramy/pyrex
 def ask_for_URLs(self):
     """ 1) Envoie une recherche en background de fichiers similaires
     2) Lorsque les urls sont arrivées, set_URLs met à jour les urls puis appelle manage_download
     3) _ftp.Manage_download ajoute des urls pour aider au download"""
     print "Asking for urls"
     search = Search(self.file_share.name, self.file_share.protocol)
     # Signal
     self.resultsReceived.connect(self.set_URLs)
     search.do_search(self.resultsReceived.emit)
コード例 #13
0
def autocomplete_api(word):
    word = word + u"*" 
    lang = request.args.get('lang')
    if lang is None:
        search = Search(word)
    else:
        search = Search(word, lang)

    search.AutoComplete = True
    return json_answer(search.get_json())
コード例 #14
0
ファイル: ea_progress.py プロジェクト: joeracker/jiraTracker
def get_issues_with_label():
	i = 0
	y = 0
	for label in labels:
		issues = Search(search_query + "AND labels = '%s'" % label)
		print(label)
		print("%s total issues. %s total points.") % (
			issues.get_issue_count(), issues.get_point_sum())
		i = i + issues.get_point_sum()
		y = y + issues.get_issue_count()
	print("Total issues: %s Total Points: %s") % (y, i)
コード例 #15
0
ファイル: mdserver.py プロジェクト: liuchaofeng1230/mdserver
def search_files():
    keyword = request.GET.get('w')
    if len(keyword) > 0:
        keyword = keyword.strip()
        
    s = Search(os.getcwd(), keyword.decode("utf-8"), ("*.markdown", "*.md"))
    result = s.walk()

    result = [x for x in result if x[1] is not None]
    result = map(lambda x : [x[0][len(os.getcwd()):len(x[0])], x[1]], result)
    return dict(results = result, keyword = keyword, request = request)
コード例 #16
0
def dict_index(lletra):
    start = lletra.find('?')
    if start != -1:
        lletra = lletra[:start]

    search = Search(lletra)
    search.Index = True
    search.Duplicates = False
    View = IndexView(lletra)
    result = View.do(search)
    return result
コード例 #17
0
	def run(self):
		search = Search(DATABASE_FILE_NAME)

		# Search!
		result_list = search.search(self.term)

		# Create an HTML file and open it in the default browser
		name = self.generate_html_file(result_list)
		webbrowser.open('file://' + name)

		# Re-enable the search button
		self.app.button.config(text='Search', state=ACTIVE)
コード例 #18
0
def search_api(word):
    lang = request.args.get('lang')
    it = request.args.get('it')
    if lang is None:
        search = Search(word)
    else:
        search = Search(word, lang)

    if it is None and is_tracking_enabled():
        save_stats(word, lang)

    return json_answer(search.get_json())
コード例 #19
0
ファイル: tests.py プロジェクト: darora/cs3245
class TestSkipListMerging(unittest.TestCase):
    def get_skipList(self, length):
        lst = SkipList()
        data = sorted([random.randint(0, length*4) for i in range(0, length)])
        for i in range(0, length):
            lst.append(data[i]) # TODO:swap out with SkipList(data)
        return lst

    def setUp(self):
        self.la = self.get_skipList(10)
        self.lb = self.get_skipList(20)
        self.lc = self.get_skipList(20)
        postings_file = "dev_postings.data"
        dictionary_file = "dev_dict.data"
        self.search = Search(postings_file, dictionary_file)

    def list_equality(self, lsta, lstb):
        try:
            self.assertEqual(len(lsta), len(lstb))
            self.assertEqual(lsta.get_list(), lstb.get_list())
        except AssertionError as e:
            logging.info(lsta)
            logging.info(lstb)
            print lsta
            print lstb
            raise e

    def test_mergingSingleListShouldReturnTheList(self):
        """
        A single skip list should just be casted into a simple list and returned
        """
        results = self.search.merge_results(Operation.AND, self.la)
        self.list_equality(self.la, results)

    def test_mergingTwoListsORshouldReturnUnion(self):
        results = self.search.merge_results(Operation.OR, self.la, self.lb)
        # results = self.search.merge_two_list(self.la, self.lb, Operation.OR)
        la = self.la.get_list()
        la.extend(self.lb.get_list())
        la = list(set(la))
        la.sort()
        self.list_equality(results, SkipList(la))

    def test_mergingTwoListsOverANDshouldReturnIntersection(self):
        results = self.search.merge_results(Operation.AND, self.la, self.lb)
        # results = self.search.merge_two_list(self.la, self.lb, Operation.AND)
        la = set(self.la.get_list())
        lb = set(self.lb.get_list())
        ls = la & lb
        ls = list(ls)
        ls.sort()
        self.list_equality(results, SkipList(ls))
コード例 #20
0
    def get_next_redo():
        from pyasm.biz import Project
        namespace = Project.get_project_code()

        user_name = Environment.get_user_name()
        search = Search("sthpw/transaction_log")
        search.add_filter("login", user_name)
        search.add_filter("namespace", namespace)
        
        search.add_filter("type", "redo")
        search.add_order_by("timestamp")
        sobject = search.get_sobject()
        return sobject
コード例 #21
0
ファイル: views.py プロジェクト: kenluck2001/Wikoid_v2
def results(search_text):
    links = []
    page_title='Search Result'
    search_text = search_text
    search_text = search_text.capitalize()
    WIKIPEDIA_BASE='https://en.wikipedia.org'
    seed = WIKIPEDIA_BASE + str('/wiki/') + str(search_text) # as seed
    search = Search(seed)
    graph = search.create_graph()
    for source, dest in graph:
        newDict = {"source": source, "target": dest }
        links.append(newDict)
    return links
コード例 #22
0
ファイル: main.py プロジェクト: farhan0581/CBIR
def call_search():
	global par_orb,par_color
	info = v.get()
	path=z.get()
	if info=='':
		tkMessageBox.showinfo('ERROR','Please enter image name!!!')
	elif info !='':
		if par_orb==1:
			s=Searcher(info)
			s.search_image()
		elif par_color==1:
			path='/home/farhan/project/CBIR/my_contrib/index.csv'
			obj=Search(info,path)
			obj.main_search()
コード例 #23
0
    def get_last(type=None):
        #namespace = SearchType.get_project()
        from pyasm.biz import Project
        namespace = Project.get_project_code()

        user_name = Environment.get_user_name()
        search = Search("sthpw/transaction_log")
        search.add_filter("login", user_name)
        search.add_filter("namespace", namespace)
        if type:
            search.add_filter("type", type)
        search.add_order_by("timestamp desc")
        sobject = search.get_sobject()
        return sobject
コード例 #24
0
def api_organisations():
    timer = Timer()
    timer.start()

    app.logger.debug(request.args)

    filter = {}
    filter["handelsnaam"] = check_args("handelsnaam")
    filter["kvknummer"] = check_args("kvknummer")
    filter["straat"] = check_args("straat")
    filter["huisnummer"] = check_args("huisnummer")
    filter["postcode"] = check_args("postcode")
    filter["plaats"] = check_args("plaats")
    filter["hoofdvestiging"] = check_args_boolean("hoofdvestiging", True, False)
    filter["nevenvestiging"] = check_args_boolean("nevenvestiging", True, False) 
    filter["rechtspersoon"] = check_args_boolean("rechtspersoon", True, False)
    filter["vervallen"] = check_args_boolean("vervallen", False, True)
    filter["uitgeschreven"] = check_args_boolean("uitgeschreven", False, True)

    app.logger.debug(filter)
                                
    if filter["handelsnaam"] == "" and filter["kvknummer"] == "" and filter["straat"] == "" and filter["huisnummer"] == "" and filter["postcode"] == "" and filter["plaats"] == "":
        return unprocessable_entity()
    else:
        if 'startpage' in request.args:
            startpage = int(request.args['startpage'])
        else:
            startpage = 1
        if 'maxpages' in request.args:
            maxpages = int(request.args['maxpages'])
        else:
            maxpages = 1

        try:
            search = Search(filter, startpage, maxpages)
        except NoResultsError:
            return not_found()
        else:
            results = search.run()
            
            timer.stop();
            
            results["total_exectime"] = timer.exectime()
            results["api_version"] = "v1"
            results["release"] = release

            resp = jsonify(results)
            resp.status_code = 200
                                             
            return resp
コード例 #25
0
ファイル: app.py プロジェクト: xyhuang/streamsense
def logreduce():
    if request.method == 'POST':
        file = request.files['file']
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            inputfilename = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            print "in logreduce and about to save file", inputfilename
            file.save(inputfilename)
            datastore = read_lines_from_file(inputfilename)
            #insert into elastic search
            for line in datastore:
                Search.insert(jsonify(line))
            app.config['DATA'] = datastore
            app.config['LAST_FILENAME'] = inputfilename
    return render_template('pages/logreduce.html', dataLoaded=app.config['LAST_FILENAME'])
コード例 #26
0
ファイル: transaction_state.py プロジェクト: 0-T-0/TACTIC
    def get_by_ticket(ticket):
        search = Search(TransactionState)
        search.add_filter("ticket", ticket)
        state = search.get_sobject()

        # create the state data for this ticket
        if not state:
            state = SObjectFactory.create(TransactionState.SEARCH_TYPE)
            state.set_value("ticket", ticket)
            data = Xml()
            data.create_doc("state")
            state.set_value("data", data.to_string() )
            state.commit()

        return state
コード例 #27
0
def hint(request):
    prefix = request.GET.get('term', '')
    if len(prefix) < 2:
        return JSONResponse([])

    prefix = remove_query_syntax_chars(prefix)

    search = Search()
    # tagi beda ograniczac tutaj
    # ale tagi moga byc na ksiazce i na fragmentach
    # jezeli tagi dot tylko ksiazki, to wazne zeby te nowe byly w tej samej ksiazce
    # jesli zas dotycza themes, to wazne, zeby byly w tym samym fragmencie.

    tags = search.hint_tags(prefix, pdcounter=True)
    books = search.hint_books(prefix)

    def is_dupe(tag):
        if isinstance(tag, PDCounterAuthor):
            if filter(lambda t: t.slug == tag.slug and t != tag, tags):
                return True
        elif isinstance(tag, PDCounterBook):
            if filter(lambda b: b.slug == tag.slug, tags):
                return True
        return False

    tags = filter(lambda t: not is_dupe(t), tags)

    def category_name(c):
        if c.startswith('pd_'):
            c = c[len('pd_'):]
        return _(c)

    callback = request.GET.get('callback', None)
    data = [{'label': t.name,
              'category': category_name(t.category),
              'id': t.id,
              'url': t.get_absolute_url()}
              for t in tags] + \
              [{'label': b.title,
                'category': _('book'),
                'id': b.id,
                'url': b.get_absolute_url()}
                for b in books]
    if callback:
        return HttpResponse("%s(%s);" % (callback, json.dumps(data)),
                            content_type="application/json; charset=utf-8")
    else:
        return JSONResponse(data)
コード例 #28
0
ファイル: app.py プロジェクト: jbgage/Portfolio
def search(term=''):
    fileConfig('config/logging.conf')
    prop = PropertyUtil('config/application.conf')
    logger = logging.getLogger(__name__)
    topics = None
    json_data = {}
    try:
        logger.info('Term = {0}'.format(term))
        if prop is not None:
            logger.info('Index Name = {0}'.format(prop.elasticSearchIndexName))
            search = Search(prop , logger )
            if term != '':
                data = search.get(term)
                json_data = json.dumps(data)
    except Exception , error:
        logger.error('Exception occurred - {0}'.format(str(error)))
コード例 #29
0
ファイル: searchem.py プロジェクト: agrawal-git/csce670
def search():
  #rand  = random.randrange(0, db.session.query(Tweet).count())
  #tweet = db.session.query(Tweet)[rand]
  username = "******"
  query = "south" #get it from the index page text field
  search_obj = Search(username,query)
  search_result = search_obj.search()
  # ??? how to send a list using jsonfy
  return jsonify(
    id=tweet.tweet_id,
    text=tweet.text,
    link=tweet.link,
    user_id=tweet.user_id,
    screen_name=tweet.screen_name,
    retweet_count=tweet.retweet_count,
  )
コード例 #30
0
ファイル: views.py プロジェクト: davedash/mealadvisor
def search(request):
    # determine the type of search
    query = request.GET.get('q', '')
    s     = Search(query)

    context     = {'query': query}

    results = s.get_results()

    if s.result_type == 'Restaurant':
        context['restaurants'] = results
    else:
        context['locations'] = results

    return render_to_response("common/search.html", context,
    context_instance=RequestContext(request))
コード例 #31
0
 def occurence_not_unique(self, element, mineral_dict):
     return len(
         Search.search_spec(
             element,
             eval(periodic().element_with_not_unique_starting().get(
                 element)), mineral_dict))
コード例 #32
0
from datetime import datetime
from flask import Flask, render_template, request
from search import Search
from naivebayes import NaiveBayes
from imagesearch import ImageSearch
from imagecaption import ImageCaption
from sklearn.model_selection import train_test_split
from zipfile import ZipFile

with ZipFile('pre_processed_data.zip', 'r') as zipObj:
    zipObj.extractall()

search = Search()
search.init()

naive_bayes = NaiveBayes()
naive_bayes.init()

imagesearch = ImageSearch()
imagesearch.init()

image_caption = ImageCaption()
image_caption.init()

app = Flask(__name__)


@app.route("/")
def hello():
    return '<a href="/search">Search</a><br><a href="/classify">Classify</a><br><a href="/image_search">Image Search</a>'
コード例 #33
0
from crawler import TaskManager
from search import Search
from storage import Directory, Option, Task, User
from storage import LocalStorage, DuplicateDirectoryException, DuplicateUserException
from thumbnail import ThumbnailGenerator

app = Flask(__name__)
app.secret_key = "A very secret key"
storage = LocalStorage(config.db_path)

# Disable flask logging
flaskLogger = logging.getLogger('werkzeug')
flaskLogger.setLevel(logging.ERROR)

tm = TaskManager(storage)
search = Search(config.elasticsearch_index)


def get_dir_size(path):
    size = 0

    for root, dirs, files in os.walk(path):
        for filename in files:
            full_path = os.path.join(root, filename)
            size += os.path.getsize(full_path)

    return size


@app.route("/user/<user>")
def user_manage(user):
コード例 #34
0
    def Search(self,
               lang,
               cond,
               headcond=None,
               finheaddepcond=None,
               depcond2=None,
               headdepcond=None,
               prevcond=None,
               nextcond=None,
               secondnextcond=None,
               prevornext=False,
               samesentencecond=None,
               secondpreviouscond=None,
               limited=None,
               broad=False):
        con = self.cons[lang]['db']

        thisSearch = Search(con=con)
        thisSearch.queried_table = self.cons[lang]['table']
        thisSearch.isparallel = False
        thisSearch.toplevel = "sentence_id"
        thisSearch.limited = False
        thisSearch.ConditionColumns.append(cond)
        thisSearch.broad = broad
        thisSearch.non_db_data = None

        thisSearch.headcond = headcond
        thisSearch.secondpreviouscond = secondpreviouscond
        thisSearch.prevcond = prevcond
        thisSearch.nextcond = nextcond
        if prevornext:
            #In situations where it is enough for either of the surrounding words to fulfill a criterion
            thisSearch.prevornext['ison'] = True
        thisSearch.samesentencecond = samesentencecond
        thisSearch.secondnextcond = secondnextcond
        thisSearch.headdepcond = headdepcond
        thisSearch.finheaddepcond = finheaddepcond
        thisSearch.depcond2 = depcond2

        thisSearch.Run(False)

        self.searches.append(thisSearch)

        #Store the results in a more comfortable way
        SimplifyResultSet(thisSearch)

        logging.info(thisSearch.absolutematchcount)

        self.searches.append(thisSearch)

        return thisSearch
コード例 #35
0
                        action='store_true',
                        help="Delete existing listing.")
    parser.add_argument(
        '--print',
        nargs='?',
        const=1,
        type=int,
        help=
        "Print 10 listings per page, pass integer argument for print select page"
    )
    return parser.parse_args()  # Return parsed arguments.


# Below is the part of main.py that will execute.
if __name__ == "__main__":
    searcher = Search()
    editor = ListingEditor()
    raw_args = parse_args()
    Sorter = QuickSort()
    arg_list = {}

    path_active = "data_files/active_listings.json"
    path_matched = "data_files/matched_listings.json"
    path_sorted = "data_files/price_sorted_listings.json"

    for key in vars(raw_args):  # Key refers to item being detailed by user.
        value = getattr(raw_args, key)  # Value of item, this is users input.
        arg_list[key] = value

    #Check if the argument for print is 0, or an error will occur
    if arg_list['print'] == 0:
コード例 #36
0
class QueryReofrmulatorEnv(Env):
    def __init__(self, DATA_DIR, dset, is_train, verbose, reward='RECALL'):
        # this method returns simulator, state/action vocabularies, and the maximum number of actions
        n_words = 100  # 374000 # words for the vocabulary
        vocab_path = os.path.join(
            DATA_DIR, 'data/D_cbow_pdw.pkl'
        )  # Path to the python dictionary containing the vocabulary.
        wordemb_path = os.path.join(
            DATA_DIR, 'data/D_cbow_pdw.pkl'
        )  # Path to the python dictionary containing the word embeddings.
        dataset_path = os.path.join(
            DATA_DIR, 'data/msa_dataset.hdf5'
        )  # path to load the hdf5 dataset containing queries and ground-truth documents.
        docs_path = os.path.join(
            DATA_DIR,
            'data/msa_corpus.hdf5')  # Path to load the articles and links.
        docs_path_term = os.path.join(
            DATA_DIR,
            'data/msa_corpus.hdf5')  # Path to load the articles and links.
        ############################
        # Search Engine Parameters #
        ############################
        n_threads = 1  # 20 # number of parallel process that will execute the queries on the search engine.
        index_name = 'index'  # index name for the search engine. Used when engine is 'lucene'.
        index_name_term = 'index_terms'  # index name for the search engine. Used when engine is 'lucene'.
        use_cache = False  # If True, cache (query-retrieved docs) pairs. Watch for memory usage.
        max_terms_per_doc = 15  # Maximum number of candidate terms from each feedback doc. Must be always less than max_words_input .
        self.vocab = utils.load_vocab(vocab_path, n_words)
        vocabinv = {}
        for k, v in self.vocab.items():
            vocabinv[v] = k
        self.reward = reward
        self.is_train = is_train
        self.search = Search(engine=lucene_search.LuceneSearch(
            DATA_DIR, self.vocab, n_threads, max_terms_per_doc, index_name,
            index_name_term, docs_path, docs_path_term, use_cache))

        t0 = time()
        dh5 = dataset_hdf5.DatasetHDF5(dataset_path)
        self.qi = dh5.get_queries(dset)
        self.dt = dh5.get_doc_ids(dset)
        print("Loading queries and docs {}".format(time() - t0))
        self.reset()
        '''for _, train_index in kf:
            qi, qi_i, qi_lst, D_gt_id, D_gt_url = self.get_samples(qi, dt, vocab, train_index, self.search.engine, max_words_input=self.search.max_words_input)
            # share the current queries with the search engine.
            current_queries = qi_lst
            i=3
            print 'Input Query:       ', qi[i].replace('\n', '\\n')
            print 'Target Docs: ', str(D_gt_url[i])
            print 'Input Query Vocab: ', utils.idx2text(qi_i[i], vocabinv)
            n_iterations = 2 # number of query reformulation iterations.
            for n_iter in range(n_iterations):
                print("current_queries", len(current_queries), current_queries)
                if n_iter < self.search.q_0_fixed_until:
                    ones = np.ones((len(current_queries), self.search.max_words_input))
                    if n_iter > 0:
                        # select everything from the original query in the first iteration.
                        reformulated_query = np.concatenate([ones, ones], axis=1)
                    else:
                        reformulated_query = ones
                print 'reformulated_query', reformulated_query.shape
                #reformulated_query is our action!!!
                metrics, D_i_, D_id_, D_gt_m_ = self.search.perform(reformulated_query, D_gt_id, self.is_train, current_queries)
                print "D_id_", D_id_
                print 'Iteration', n_iter
                print '  '.join(self.search.metrics_map.keys())
                print metrics.mean(0)
                print
                print 'Retrieved Docs:    ', str([self.search.engine.id_title_map[d_id] for d_id in D_id_[i]])
                print
                print 'Reformulated Query:', self.search.reformulated_queries[n_iter][i]
                print
                print 'Query ANS:         ',
                for kk, word in enumerate(current_queries[i][:reformulated_query.shape[1]]):
                    if word not in vocab and word != '':
                        word += '<unk>'
                    if reformulated_query[0, kk] == 1:
                        word = word.upper()
                    print str(word),
                print
                print'''

    def get_samples(self,
                    input_queries,
                    target_docs,
                    vocab,
                    index,
                    engine,
                    max_words_input=200):
        qi = [utils.clean(input_queries[t].lower()) for t in index]
        D_gt_title = [target_docs[t] for t in index]

        D_gt_id_lst = []
        for j, t in enumerate(index):
            #print("j",j)
            D_gt_id_lst.append([])
            for title in D_gt_title[j]:
                #print("title", title)
                if title in engine.title_id_map:
                    D_gt_id_lst[-1].append(engine.title_id_map[title])
                #else:
                #    print 'ground-truth doc not in index:', title

        D_gt_id = utils.lst2matrix(D_gt_id_lst)

        qi_i, qi_lst_ = utils.text2idx2(qi, vocab, max_words_input)
        #print("qi_i", qi_i)
        #print("qi_lst_", qi_lst_)

        qi_lst = []
        for qii_lst in qi_lst_:
            # append empty strings, so the list size becomes <dim>.
            qi_lst.append(qii_lst +
                          max(0, max_words_input - len(qii_lst)) * [''])
        return qi, qi_i, qi_lst, D_gt_id, D_gt_title

    def _seed(self, seed=None):
        self.np_random, seed = seeding.np_random(seed)
        return [seed]

    def step(self, action):
        done = False
        reformulated_query, current_queries, D_gt_id = action
        metrics, D_i_, D_id_, D_gt_m_ = self.search.perform(
            reformulated_query, D_gt_id, self.is_train, current_queries)
        print "D_id_", D_id_
        i = 3
        print "ALALALA ", [
            self.search.engine.id_title_map[d_id] for d_id in D_id_[i]
        ]
        text = [[self.search.engine.id_title_map[d_id] for d_id in D_id_[i]]
                for i in range(D_id_.shape[0])]
        actions = current_queries
        metric_idx = self.search.metrics_map[self.reward.upper()]
        reward = metrics[metric_idx]
        if (len(actions) == 0):  # or self.counsteps > 10):
            done = True
        return [text, actions], reward, done, {}

    def reset(self):
        """
        Resets the state of the environment, returning an initial observation.
        Outputs
        -------
        observation : the initial observation of the space. (Initial reward is assumed to be 0.)
        """
        t0 = time()
        #for now lets get one sample with all.
        kf = utils.get_minibatches_idx(len(self.qi),
                                       len(self.qi),
                                       shuffle=True)
        _, train_index = kf[
            0]  #iterate if len(kf)>1 --> for _, train_index in kf:
        print "kf", kf, len(self.qi)
        print("Got minibatch index {}".format(time() - t0))

        qi, qi_i, qi_lst, D_gt_id, D_gt_url = self.get_samples(
            self.qi,
            self.dt,
            self.vocab,
            train_index,
            self.search.engine,
            max_words_input=self.search.max_words_input)

        current_queries = qi_lst
        n_iterations = 1  # number of query reformulation iterations.
        if n_iterations < self.search.q_0_fixed_until:
            ones = np.ones((len(current_queries), self.search.max_words_input))
            reformulated_query = ones
            if n_iterations > 0:
                # select everything from the original query in the first iteration.
                reformulated_query = np.concatenate([ones, ones], axis=1)

        print 'reformulated_query', reformulated_query.shape
        # reformulated_query is our action!!!

        actions = reformulated_query, current_queries, D_gt_id
        [text, actions], reward, done, found = self.step(action)
        print "text", text
        print "actions", actions
        print "rew", reward
        return [text, actions]

    def __del__(self):
        pass

    def get_tokenizers(self):
        state_tokenizer = nltk.word_tokenize
        action_tokenizer = nltk.word_tokenize
        return state_tokenizer, action_tokenizer
コード例 #37
0
def main():
    parser = argparse.ArgumentParser(
        description="GitHub search helper. Program will ask for a input query. "
        "e.g., 'UnsupportedEncodingException UTf-8 in:file"
        "language:java' and will replace ' ' with '+'")
    parser.add_argument("--type",
                        "-t",
                        help="How much of the process do you want to run? "
                        "(1) Full "
                        "(2) Only find potential repos "
                        "(3) Search repos for code",
                        type=int,
                        default=1)

    query = input("What is your search query? ")
    query = query.replace(" ", "+")
    search = input("What is the code you are searching for? ")
    search = prepare_search(search)
    pipeline = Search("query_config.ini", query, search)

    run_type = parser.parse_args().type
    if run_type == 1:
        pipeline.find_repos()
        pipeline.find_code_in_repo()
    elif run_type == 2:
        pipeline.find_repos()
    elif run_type == 3:
        pipeline.find_code_in_repo()
    else:
        pipeline.check_search_rate()
from node import Node
from problem import Problem
from search import Search

p = Problem()
s = Search(p)

path = s.BFS().path
path.reverse()
print(path)
コード例 #39
0
class alltubefilmy:
    def __init__(self):
        log.info('Starting alltubefilmy.pl')
        self.cm = mrknow_pCommon.common()
        self.parser = mrknow_Parser.mrknow_Parser()
        self.pp = mrknow_Pageparser.mrknow_Pageparser()
        self.up = mrknow_urlparser.mrknow_urlparser()
        self.player = mrknow_Player.mrknow_Player()
        self.search = Search(url='%(quoted)s',
                             service='alltubefilmy',
                             listItemsFun=self.listsItemsOther)

    def listsMainMenu(self, table):
        for num, val in table.items():
            self.add('alltubefilmy', 'main-menu', val, 'None', 'None', 'None',
                     'None', 'None', True, False)
        xbmcplugin.endOfDirectory(int(sys.argv[1]))

    def listsCategoriesMenu(self):
        HEADER = {
            'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3',
            'Referer': mainUrl,
            'User-Agent': HOST
        }
        query_data = {
            'url': catUrl,
            'use_host': False,
            'use_host': False,
            'use_header': True,
            'header': HEADER,
            'use_cookie': False,
            'use_post': False,
            'return_data': True
        }
        link = self.cm.getURLRequestData(query_data)
        soup = BeautifulSoup(link)
        linki_ost = soup.find('ul', {"class": "filter-list filter-category"})
        #print("link",link)
        print("M1", linki_ost)
        if linki_ost:
            linki_all = linki_ost.findAll('li')
            for mylink in linki_all:
                print("m", mylink.text, mylink['data-id'])
                #murl = catUrl + match1[i][0].replace('.html','')
                self.add('alltubefilmy', 'categories-menu', mylink.text,
                         mylink.text, 'None',
                         catUrl + 'kategoria[' + mylink['data-id'] + ']+',
                         'None', 'None', True, False, str(1),
                         mylink['data-id'])
        xbmcplugin.endOfDirectory(int(sys.argv[1]))

    def getSearchURL(self, key):
        if key != None:
            url = mainUrl + '/search?search_query=' + urllib.quote_plus(
                key) + '&x=0&y=0'
            return url
        else:
            return False
        #req = urllib2.Request(url)
        #req.add_header('User-Agent', HOST)
        #openURL = urllib2.urlopen(req)
        #readURL = openURL.read()

    def listsItemsOther(self, key):
        log(key)
        query_data = {
            'url': 'http://alltube.tv/szukaj',
            'use_host': False,
            'use_cookie': False,
            'use_post': True,
            'return_data': True
        }
        post_data = {'search': key}
        link = self.cm.getURLRequestData(query_data, post_data)
        #log(link)
        if 'Seriale:' in link:
            link = re.compile('<h4>Filmy:</h4>(.*?)<h4>Seriale:</h4>',
                              re.DOTALL).findall(link)[0]
        log(link)
        soup = BeautifulSoup(link)
        linki_ost = soup.findAll('div', {"class": "item-block clearfix"})
        log("link %s" % link)
        if linki_ost:
            for mylink in linki_ost:
                log("Link2 %s" % mylink.a.h3.text)
                self.add('alltubefilmy', 'playSelectedMovie', 'None',
                         mylink.a.h3.text, mylink.a.img['src'],
                         mylink.a['href'], 'aaaa', 'None', False, True)
        xbmcplugin.endOfDirectory(int(sys.argv[1]))

    def GetImage(self, url):
        query_data = {
            'url': url,
            'use_host': False,
            'use_cookie': False,
            'use_post': True,
            'return_data': True
        }
        link = self.cm.getURLRequestData(query_data)
        match2 = re.compile(
            '<div class="span2">\n                       <img src="(.*?)" alt=""/>\n                       \n                    </div>',
            re.DOTALL).findall(link)
        if len(match2) > 0:
            return match2[0]
        else:
            return ""

    def listsItems(self, url, strona='', kategoria=''):
        if strona == '':
            strona = 1
        nowastrona = int(strona) + 1

        if kategoria == 'None':
            myurl = catUrl + 'strona[' + str(nowastrona) + ']+'
        else:
            myurl = catUrl + 'kategoria[' + kategoria + ']+' + 'strona[' + str(
                nowastrona) + ']+'

        HEADER = {
            'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3',
            'Referer': mainUrl,
            'User-Agent': HOST
        }
        query_data = {
            'url': url,
            'use_host': False,
            'use_header': True,
            'header': HEADER,
            'use_cookie': False,
            'use_post': False,
            'return_data': True
        }
        link = self.cm.getURLRequestData(query_data)

        soup = BeautifulSoup(link)
        linki_ost = soup.findAll('div',
                                 {"class": "col-xs-12 col-sm-6 col-lg-4"})
        #print("link",link)
        if linki_ost:
            #linki_all = soup.findAll('div', {"class": "series"})
            for mylink in linki_ost:
                #print("m",mylink)
                #print("M2",mylink.a['href'])
                #print("M3",mylink.img['src'])
                print("M4", url, strona, mylink.h3.text)
                #add(self, service, name,               category, title,     iconimage, url, desc, rating, folder = True, isPlayable = True):
                self.add('alltubefilmy', 'playSelectedMovie', 'None',
                         mylink.h3.text, mylink.img['src'], mylink.a['href'],
                         'aaaa', 'None', False, True)
        # add(self, service, name,                   category,        title,       iconimage,          url,                           desc, rating, folder = True, isPlayable = True,strona=''):
        self.add('alltubefilmy', 'categories-menu', 'Następna strona',
                 'Następna strona', 'None', myurl, 'None', 'None', True, False,
                 str(nowastrona), kategoria)

        xbmcplugin.endOfDirectory(int(sys.argv[1]))

    def listsItemsA(self, url):
        query_data = {
            'url': url,
            'use_host': False,
            'use_cookie': False,
            'use_post': True,
            'return_data': True
        }
        link = self.cm.getURLRequestData(query_data)
        match = re.compile('<li class="letter">(.*?)</li>',
                           re.DOTALL).findall(link)
        print(match)
        if len(match) > 0:
            for i in range(len(match)):
                self.add('alltubefilmy', 'page-menu', 'None', match[i], 'None',
                         mainUrl, 'aaaa', 'None', True, False)
        xbmcplugin.endOfDirectory(int(sys.argv[1]))

    def listsItemsS(self, url, strona):
        query_data = {
            'url': url,
            'use_host': False,
            'use_cookie': False,
            'use_post': True,
            'return_data': True
        }
        link = self.cm.getURLRequestData(query_data)
        match0 = re.compile(
            '<li data-letter="' + strona + '"><a href="(.*?)">(.*?)</a></li>',
            re.DOTALL).findall(link)
        print("Match", match0)
        #match1 = re.compile('<li><a href="(.*?)">(.*?)</a></li>\n', re.DOTALL).findall(match[0])
        #print match1
        if len(match0) > 0:
            for i in range(len(match0)):
                title = match0[i][1]

                self.add('alltubefilmy', 'items-menu', 'None',
                         self.cm.html_special_chars(title), 'None',
                         match0[i][0], 'aaaa', 'None', True, False)
        xbmcplugin.endOfDirectory(int(sys.argv[1]))

    def listsItemsOst(self, url):
        HEADER = {
            'Accept-Language': 'pl,en-US;q=0.7,en;q=0.3',
            'Referer': mainUrl,
            'User-Agent': HOST
        }
        query_data = {
            'url': url,
            'use_host': False,
            'use_host': False,
            'use_header': True,
            'header': HEADER,
            'use_cookie': False,
            'use_post': False,
            'return_data': True
        }
        link = self.cm.getURLRequestData(query_data)
        soup = BeautifulSoup(link)
        linki_ost = soup.find('div', {"class": "col-sm-9"})
        #print("link",link)
        if linki_ost:
            linki_all = soup.findAll('div', {"class": "series"})
            for mylink in linki_all:
                print("m", mylink)
                print("M2", mylink.a['href'])

                myimage = mylink.img['src']
                mytitle = mylink.contents[1].text
                myhref = mylink.a['href']
                #myseries = mylink.contents[2].findAll('li')
                #for myitem in myseries:
                self.add('alltubefilmy', 'playSelectedMovie', 'None', mytitle,
                         myimage, myhref, 'aaaa', 'None', False, True, '')

        self.add('alltubefilmy', 'playSelectedMovie', 'None', mytitle, myimage,
                 myhref, 'aaaa', 'None', False, True, '')

        xbmcplugin.endOfDirectory(int(sys.argv[1]))

    def listsItemsTop(self, url):
        query_data = {
            'url': url,
            'use_host': False,
            'use_cookie': False,
            'use_post': True,
            'return_data': True
        }
        link = self.cm.getURLRequestData(query_data)
        match = re.compile(
            '<!-- popularne dzisiaj -->\n(.*?)<!-- /popularne dzisiaj -->',
            re.DOTALL).findall(link)
        print match
        #                    <td class="title" tyle="width:200px;"><a href="     ">     </a></td>\n                       <td class="episode">\n                          <a href="     "><span class="w">     </span>     </a>\n                       </td>
        match1 = re.compile(
            '<td class="title" tyle="width:200px;"><a href="(.*?)">(.*?)</a></td>\n                       <td class="episode">\n                          <a href="(.*?)"><span class="w">(.*?)</span>(.*?)</a>\n                       </td>',
            re.DOTALL).findall(match[0])
        if len(match1) > 0:
            for i in range(len(match1)):
                #print ("M",match1[i])
                self.add(
                    'alltubefilmy', 'playSelectedMovie', 'None',
                    self.cm.html_special_chars(match1[i][1].strip() + ' ' +
                                               match1[i][3].strip() + ' ' +
                                               match1[i][4].strip()), 'None',
                    mainUrl[:-1] + match1[i][2], 'aaaa', 'None', False, False,
                    '')
        xbmcplugin.endOfDirectory(int(sys.argv[1]))

    def listsItemsPage(self, url):
        if not url.startswith("http://"):
            url = mainUrl + url
        if self.getSizeAllItems(url) > 0 and self.getSizeItemsPerPage(url) > 0:
            a = math.ceil(
                float(self.getSizeAllItems(url)) /
                float(self.getSizeItemsPerPage(url)))
            for i in range(int(a)):
                num = i + 1
                title = 'Lista ' + str(num)
                destUrl = url + sort_asc + '&page=' + str(num)
                self.add('alltubefilmy', 'items-menu', 'None', title, 'None',
                         destUrl, 'None', 'None', True, False)
        xbmcplugin.endOfDirectory(int(sys.argv[1]))

    def listsSeasons(self, url, img):
        query_data = {
            'url': url,
            'use_host': False,
            'use_cookie': False,
            'use_post': True,
            'return_data': True
        }
        link = self.cm.getURLRequestData(query_data)
        match = re.compile(
            '<button data-action="scrollTo" data-scroll="(.*?)" class="btn btn-new cf sezonDirect" style="width:85px; font-size:13px;margin: 3px;" href="#" rel="1">(.*?)</button>',
            re.DOTALL).findall(link)
        #print match
        if img == '' or img == None:
            img = 'None'
        for i in range(len(match)):
            self.add('alltubefilmy', 'items-menu', 'None', match[i][1], img,
                     url, 'None', 'None', True, False, match[i][0])
        xbmcplugin.endOfDirectory(int(sys.argv[1]))

    def searchInputText(self):
        text = None
        k = xbmc.Keyboard()
        k.doModal()
        if (k.isConfirmed()):
            text = k.getText()
        return text

    def add(self,
            service,
            name,
            category,
            title,
            iconimage,
            url,
            desc,
            rating,
            folder=True,
            isPlayable=True,
            strona='',
            kategoria=''):
        u = sys.argv[
            0] + "?service=" + service + "&name=" + name + "&category=" + category + "&title=" + title + "&url=" + urllib.quote_plus(
                url) + "&icon=" + urllib.quote_plus(
                    iconimage) + "&strona=" + urllib.quote_plus(
                        strona) + "&kategoria=" + urllib.quote_plus(kategoria)
        #log.info(str(u))
        if name == 'main-menu' or name == 'categories-menu':
            title = category
        if iconimage == '':
            iconimage = "DefaultVideo.png"
        liz = xbmcgui.ListItem(title,
                               iconImage="DefaultFolder.png",
                               thumbnailImage=iconimage)
        if isPlayable:
            liz.setProperty("IsPlayable", "true")
        liz.setInfo(type="Video", infoLabels={"Title": title})
        xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
                                    url=u,
                                    listitem=liz,
                                    isFolder=folder)

    def handleService(self):
        params = self.parser.getParams()
        name = self.parser.getParam(params, "name")
        category = self.parser.getParam(params, "category")
        url = self.parser.getParam(params, "url")
        title = self.parser.getParam(params, "title")
        icon = self.parser.getParam(params, "icon")
        strona = self.parser.getParam(params, "strona")
        kategoria = self.parser.getParam(params, "kategoria")
        img = self.parser.getParam(params, "img")
        print("DANE", kategoria, strona, url, title, name, icon)

        if name == None:
            self.listsMainMenu(MENU_TAB)
        elif name == 'main-menu' and category == 'Ostatnio dodane':
            log.info('Jest Ostatnio dodane: ')
            self.listsItems(catUrl, 1, 'None')
        elif name == 'main-menu' and category == 'Kategorie':
            log.info('Jest Kategorie: ')
            self.listsCategoriesMenu()

        elif name == 'categories-menu':
            log.info('Jest categories-menu: ')
            self.listsItems(url, strona, kategoria)
        elif name == 'page-menu' and category == 'None':
            log.info('Jest Alfabetycznie Litera: ' + title)
            self.listsItemsS(catUrl, title)
        elif name == 'serial-menu' and category == 'None':
            log.info('Jest Serial Menu: ')
            self.listsSeasons(url, img)
        elif name == 'items-menu' and category == 'None':
            log.info('Jest Sezon: ')
            self.listsItems(url, strona)
        elif name == 'main-menu' and category == 'Top dzisiaj':
            log.info('Jest Top 30: ')
            self.listsItemsTop(catUrl)
#        elif name == 'main-menu' and category == 'Ostatnie dodane seriale':
#            self.listsItemsTop(catUrl,'Ostatnie dodane seriale', 'Ostatnie dodane odcinki')
        elif name == 'main-menu' and category == 'Ostatnio dodane odcinki':
            log.info('Jest Gorące: ')
            self.listsItemsOst(catUrl)

    #elif name == 'main-menu' and category == "Szukaj":
    #    key = self.searchInputText()
    #    if key != None:
    #        self.listsItemsOther(key)
        elif self.search.handleService(
                force=(name == 'main-menu' and category == 'Szukaj')):
            return
        elif name == 'categories-menu' and category != 'None':
            log.info('url: ' + str(url))
            self.listsItems(url, strona, filtrowanie)
        if name == 'playSelectedMovie':
            self.player.LOAD_AND_PLAY_VIDEO(url, title, '')
コード例 #40
0
import numpy as np
from asm1.model.hex_board import HexBoard
import trueskill as ts

"""EXTRA TIME FOR DIJKSTRA EVALUATION"""
def play_game_and_time(player1, player2, board_size = 3):
    board = HexBoard(board_size)
    player2.color = HexBoard.get_opposite_color(player1.color)
    current_player = player1 if player1.color == HexBoard.BLUE else player2
    turn_time = {player1:[], player2: []}
    while not board.game_over:
        start_time = time.time()
        next_move = current_player.generate_move(board)
        turn_time[current_player].append(time.time() - start_time)
        board.place(next_move, current_player.color)
        current_player = player1 if player1.color == HexBoard.get_opposite_color(current_player.color) else player2
    return np.mean(turn_time[player1]), np.mean(turn_time[player2])

player_random_3 = Search(**{'species': 'ai', 'color': 'red', 'method': 'alphabeta', 'evaluation': 'random', 'search_depth': 3})
player_dijkstra_3 = Search(**{'species': 'ai', 'color': 'blue', 'method': 'alphabeta', 'evaluation': 'dijkstra', 'search_depth': 3})
player_dijkstra_4 = Search(**{'species': 'ai', 'color': 'blue', 'method': 'alphabeta', 'evaluation': 'dijkstra', 'search_depth': 4})
player_mcts = Search(**{'species': 'ai', 'color': 'blue', 'method': 'mcts', 'evaluation': 'mcts', 'search_depth': 4,
                        'rating': ts.Rating(), 'search_iterations': 100, 'Cp': 2})
player_tt_random_3 = Search(**{'species': 'ai', 'color': 'red', 'method': 'alphabeta', 'evaluation': 'random', 'search_depth': 3, "transposition":True})
player_random_3_time, player_dijkstra_3_time = play_game_and_time(player_random_3, player_dijkstra_3, board_size=4)
player_mcts_time, player_dijkstra_4_time = play_game_and_time(player_mcts, player_dijkstra_4, board_size=4)

print("Taking a turn with Dijkstra 3 evaluation function takes: ", player_dijkstra_3_time," seconds for board size of 4")
print("Taking a turn with random evaluation function takes: ", player_random_3_time," seconds")
print("Taking a turn with dijikstra 4 takes: ", player_dijkstra_4_time," seconds")
print("Taking a turn with mcts: ", player_mcts_time," seconds")
コード例 #41
0
 def test_cache_usage(self):
     s = Search()
     print("\nInitial cache length: ", len(s.lru_cache))
     s.string_search("power")
     self.assertEqual(1, len(s.lru_cache),
                      "Cache not storing recently searched term")
コード例 #42
0
 def drop_down_callback(self, event):
     s = Search(tree=self.tree)
     s.search_by_county(county_name=self.drop_down.get())
コード例 #43
0
 def occurence_unique(self, element, mineral_dict):
     return len(Search.search(element, mineral_dict))
コード例 #44
0
from node import Node
from problem import Problem
from search import Search

p = Problem()
s = Search(p)
r = s.IDFS(maxDepth=14)

print(r.status)
path = r.path
path.reverse()
print(path)
print("number of expansions : ")
print(r.expandedNodes)
print("depth : ")
print(r.depth)
コード例 #45
0
    def equals(self, state):
        return self.value == state.value

    def dict_key(self):
        return str(self.missionaries) + "," + str(self.cannibals) + "," + str(self.boat)

    def applyOperators(self):
        valid_successors = []
        if self.send_1m().value in LEGAL_STATES:
            valid_successors.append(self.send_1m())
        if self.send_2m().value in LEGAL_STATES:
            valid_successors.append(self.send_2m())
        if self.send_1c().value in LEGAL_STATES:
            valid_successors.append(self.send_1c())
        if self.send_2c().value in LEGAL_STATES:
            valid_successors.append(self.send_2c())
        if self.send_1m_1c().value in LEGAL_STATES:
            valid_successors.append(self.send_1m_1c())
        if self.return_1c().value in LEGAL_STATES:
            valid_successors.append(self.return_1c())
        if self.return_1m().value in LEGAL_STATES:
            valid_successors.append(self.return_1m())
        if self.return_1m_1c().value in LEGAL_STATES:
            valid_successors.append(self.return_1m_1c())
        return valid_successors


Search(MissionaryState(3, 3, 1), MissionaryState(0, 0, 0), True)
print("----------------------------------------------------------------------------------------------------------------")
ImprovedSearch(MissionaryState(3, 3, 1), MissionaryState(0, 0, 0), True)
コード例 #46
0
'''
Artificial Intelligence I
Assignment 1
Author: Bo Lin
Date of created: 09/09/2019
Date of modified: 09/10/2019
'''

import sys
from utils import get_input
from search import Search

if __name__ == "__main__":
    graph, origin_city, destination_city = get_input(sys.argv)
    searcher = Search(graph)
    num_node_expanded, num_node_generate, max_node_in_memory, distance, node = searcher.search(
        origin_city, destination_city)

    output = ""
    if node is not None:
        route = [node.name]
        while node.parent is not None:
            route.append(node.parent.name)
            node = node.parent
        route = route[::-1]
        for i in range(len(route) - 1):
            start = route[i]
            end = route[i + 1]
            output += "%s to %s, %.2f\n" % (start, end,
                                            graph[start].get_neighbor_cost(end))
    else:
コード例 #47
0
ファイル: worklog.py プロジェクト: GarrettKucinski/work_log
    def access_log(self):
        '''Method containing the main loop to run the program.'''

        menu = Menu()
        entry = Entry()
        utils = Utilities()
        search = Search()
        validation = Validation()
        current_menu = constants.MAIN_MENU

        if not os.path.exists(constants.FILENAME):
            with open(constants.FILENAME, 'a') as file:
                writer = csv.DictWriter(file, fieldnames=constants.FIELDNAMES)
                writer.writeheader()

        while True:
            utils.clear_screen()
            menu.display(current_menu)
            choice = menu.get_user_choice()

            if current_menu == constants.MAIN_MENU:

                if not validation.is_valid_input(choice, menu='csq'):
                    continue

                if choice == 'c':
                    utils.clear_screen()
                    entry.create_new_entry()

                elif choice == 's':
                    current_menu = constants.SEARCH_MENU

                elif choice == 'q':
                    break

            elif current_menu == constants.SEARCH_MENU:

                if not validation.is_valid_input(choice, menu='edtprm'):
                    continue

                if choice == 'e':
                    search.search('Please enter a date to search: ', 'date')

                elif choice == 'd':
                    search.search(
                        'Please enter two comma separated dates to search'
                        '\n(ex. 01/15/1982, 12/11/2017): ', 'date_range')

                elif choice == 't':
                    search.search(
                        'Please enter a time to search: ', 'time_spent')

                elif choice == 'p':
                    search.search(
                        'Please enter a word or phrase to search: ',
                        'exact_match')

                elif choice == 'r':
                    search.search(
                        'Please enter a word or phrase to search: ', 'regex')

                elif choice == 'm':
                    current_menu = constants.MAIN_MENU
コード例 #48
0
def makeSearch(ConditionColumns,
               database,
               dbtable,
               headcond=None,
               depcond=None,
               headdepcond=None,
               secondnextcond=None,
               finheaddepcond=None,
               depcond2=None,
               nextcond=None,
               prevcond=None,
               prevornext=False,
               appendconditioncolumns=True,
               isparallel=False,
               extralog='',
               limited=None,
               monoling=False,
               samesentencecond=None,
               secondpreviouscond=None,
               non_db_data=None,
               group=None,
               broad=False,
               trmeta=False):
    logging.info('Starting the search..')
    if extralog:
        logging.info(extralog)
    thisSearch = Search(database, askname=False)
    if trmeta:
        thisSearch.sql_cols = "tokenid, token, lemma, pos, feat, head, deprel, align_id, id, sentence_id, text_id, translation_id"
    thisSearch.isparallel = isparallel
    thisSearch.limited = limited
    if monoling:
        thisSearch.toplevel = "sentence_id"
    else:
        thisSearch.toplevel = "align_id"
    if appendconditioncolumns:
        thisSearch.ConditionColumns.append(ConditionColumns)
    else:
        thisSearch.ConditionColumns = ConditionColumns
    thisSearch.headcond = headcond
    thisSearch.depcond = depcond
    thisSearch.depcond2 = depcond2
    thisSearch.headdepcond = headdepcond
    thisSearch.finheaddepcond = finheaddepcond
    thisSearch.prevcond = prevcond
    thisSearch.nextcond = nextcond
    thisSearch.samesentencecond = samesentencecond
    thisSearch.secondnextcond = secondnextcond
    thisSearch.secondpreviouscond = secondpreviouscond
    thisSearch.non_db_data = non_db_data
    thisSearch.groupname = group
    thisSearch.queried_table = dbtable
    if prevornext:
        #In situations where it is enough for either of the surrounding words to fulfill a criterion
        thisSearch.prevornext['ison'] = True
    if broad:
        thisSearch.broadcontext = True
    thisSearch.BuildSubQuery()
    thisSearch.Find()
    logging.info('Search committed')
    thisSearch.SimplifyResultSet()
    return thisSearch
コード例 #49
0
ファイル: search_test.py プロジェクト: aaiyeolaa/car
 def test_repr(self):
     s = Search("used")
     self.assertEqual(s.__repr__(), 'You have searched for 0 used car(s)')
コード例 #50
0
def makeNondbSearch(ConditionColumns,
                    headcond=None,
                    depcond=None,
                    headdepcond=None,
                    secondnextcond=None,
                    finheaddepcond=None,
                    depcond2=None,
                    nextcond=None,
                    prevcond=None,
                    prevornext=False,
                    appendconditioncolumns=True,
                    isparallel=False,
                    extralog='',
                    limited=None,
                    monoling=False,
                    samesentencecond=None,
                    secondpreviouscond=None,
                    non_db_data=None):
    logging.info('Starting the search..')
    if extralog:
        logging.info(extralog)
    thisSearch = Search(askname=False, pseudo=True)
    thisSearch.isparallel = isparallel
    thisSearch.limited = limited
    if monoling:
        thisSearch.toplevel = "sentence_id"
    if appendconditioncolumns:
        thisSearch.ConditionColumns.append(ConditionColumns)
    else:
        thisSearch.ConditionColumns = ConditionColumns
    thisSearch.headcond = headcond
    thisSearch.depcond = depcond
    thisSearch.depcond2 = depcond2
    thisSearch.headdepcond = headdepcond
    thisSearch.finheaddepcond = finheaddepcond
    thisSearch.prevcond = prevcond
    thisSearch.nextcond = nextcond
    thisSearch.samesentencecond = samesentencecond
    thisSearch.secondnextcond = secondnextcond
    thisSearch.secondpreviouscond = secondpreviouscond
    thisSearch.non_db_data = non_db_data
    if prevornext:
        #In situations where it is enough for either of the surrounding words to fulfill a criterion
        thisSearch.prevornext['ison'] = True
    thisSearch.Find()
    logging.info('Search committed')
    return thisSearch
コード例 #51
0
ファイル: solve.py プロジェクト: teo-sert/ev3dev_examples
 16 17 18  25 26 27  34 35 36  43 44 45
           46 47 48
           49 50 51
           52 53 54

The middle square for each side defines the color for that side
The facelet string must be in side order U, R, F, D, L, B

- Side U is Ye, replace all instances of Ye with U
- Side R is Rd, replace all instances of Rd with R
etc, etc

We are left with:
U = LLUFURFRF
R = RFBRRBLBU
F = UBDLFDDFD
D = RDFDDURRL
L = DURDLFBLB
B = LUBLBBFUU

Which you pass to Search.solution() as one big string
LLUFURFRFRFBRRBLBUUBDLFDDFDRDFDDURRLDURDLFBLBLUBLBBFUU
'''

parser = argparse.ArgumentParser()
parser.add_argument('facelet', help='Facelet string', default=None)
args = parser.parse_args()

cube = Search()
print cube.solution(args.facelet, maxDepth=21, timeOut=600, useSeparator='')
コード例 #52
0
ファイル: search_test.py プロジェクト: aaiyeolaa/car
 def test_create_search(self):
     s = Search('used')
     self.assertEqual('used', s.search_type)
     self.assertListEqual([], s.cars)
コード例 #53
0
ファイル: SearchTest.py プロジェクト: osga1291/overflow
    def testInitlizeWithoutParams(self):

        search = Search("ImportError: No module named")
        self.assertNotEqual(len(search.results), 0)
        print(search.results)
コード例 #54
0
import os
import sys
from search import Search as sh
from whoosh.analysis import StemmingAnalyzer
from whoosh.fields import Schema,TEXT

address=input("enter the root file address: ")
files=os.listdir(address)
dir_name=input("enter the index dir name: ")

schema=Schema(title=TEXT(stored=True),content=TEXT(analyzer=StemmingAnalyzer()))
index_addr=os.path.join(address,dir_name)
ix=sh.create_index(index_addr,schema)

for file in files:
	if(os.path.splitext(file)[1] == '.txt'):
		file_address=os.path.join(address,file)
		with open(file_address) as f:
			line = f.readline()
			while line:
				info=line.split(" ")
				key=info[0]+info[1]
				sh.addDoc(ix,key,info[2])
				line = f.readline()

			
			
			
コード例 #55
0
 def __init__(self, json_handler):
     self.SEEK_search = Search(json_handler)
コード例 #56
0
from datetime import date, timedelta

#parse the configuration file
config = ParseConfig("config.ini")
baseURL = config.retrieveBaseUrl()
apikey = config.retrieveApiKey()

#parse the user input file
userinput = ParseUserInput("userinput.json")
location = userinput.getLocation()

#check_in and chek_out date
check_in = date.today()
check_out = check_in + timedelta(1)

#create the request
userRequest = HotelAirportSearchRequest(apikey, location, check_in, check_out)
userRequest = userinput.setOptionalRequestParameters(userRequest)

#call search with the request and the base url
src = Search(baseURL)
print(src.getRequestURL(userRequest.getRequestParameters()))

#parse the response
rsp = HotelAirportSearchResponse(
    src.getJsonResponse(userRequest.getRequestParameters()))
rsp.writeResponseOnFile("hotels_full_details.json")
rsp.writeMinDetailsResponseOnFile("hotels_min_details.json")
rsp.generateHtmlFromJsonResponse("hotels_min_details.html")
コード例 #57
0
ファイル: macro.py プロジェクト: ousunny/donut-python
class Macro:
    def __init__(self):
        pyautogui.PAUSE = round(uniform(1.5, 2.3), 2)
        self.listener = Listener()
        self.search = Search()
        self.bglistener = None
        self.waypoints = []
        self.end = False

    def __repr__(self):
        return f"{len(self.waypoints)} waypoint(s) in macro"

    def _on_release(self, key):
        if key == keyboard.Key.esc:
            self.end = True
            self.bglistener.stop()
            print("Ending macro ...")

            return False

    def start(self):
        self.end = False
        self.bglistener = keyboard.Listener(on_release=self._on_release)
        self.bglistener.start()

        print("Macro started ...")
        while not self.end:
            for waypoint in self.waypoints:
                while True:
                    is_success = self.moveTo(waypoint)
                    if is_success:
                        break

        print("Macro ended ...")

    def show_waypoints(self):
        print(f"Current waypoints:")
        print(f"---------------------------------")
        for i in range(0, len(self.waypoints)):
            print(f"{i} : {self.waypoints[i]}")
        print(f"---------------------------------")

    def delete_waypoint(self, index):
        self.waypoints.pop(index)
        print(f"---------------------------------")
        for i in range(0, len(self.waypoints)):
            print(f"{i} : {self.waypoints[i]}")
        print(f"---------------------------------")

    def add_waypoint(self):
        region = self.listener.listen()
        if not None in region:
            image = self.search.find_image(region=region)
            if image:
                self.waypoints.append({
                    "position": (region[0], region[1]),
                    "box": (region[2], region[3]),
                    "image": image,
                })

    def moveTo(self, waypoint, random=True):
        try:
            pyautogui.PAUSE = round(uniform(1.5, 3.0), 2)
            print(pyautogui.PAUSE)
            if not random:
                pyautogui.moveTo(waypoint["position"], duration=0.2)

            if self.search.find_image(image=waypoint["image"]):
                randX = randint(
                    waypoint["position"][0],
                    waypoint["position"][0] + waypoint["box"][0],
                )
                randY = randint(
                    waypoint["position"][1],
                    waypoint["position"][1] + waypoint["box"][1],
                )

                randPosition = (randX, randY)
                pyautogui.moveTo(randPosition, duration=0.2)
                return True
            else:
                print("\033[91mWaypoint image not found ...\033[0m")
                return False
        except:
            pass
コード例 #58
0
def main():
    filename = 'open'
    searchMethod = 'gbfs'

    Maze,start,dst = readFile('data/'+ filename+'Maze.txt')
    model = Search(Maze,start,dst)
    if searchMethod == 'bfs':
        model.search(h_bfs,None)
    elif searchMethod == 'dfs':
        model.search(h_dfs,None)
    elif searchMethod == 'gbfs':
        model.search(h_gbfs,f)
    elif searchMethod == 'astar':
        model.search(h_astar,f)

    model.plot(filename,searchMethod)
コード例 #59
0
 def test_get_file_count(self):
     s = Search()
     file_locs = s.get_dirfiles()
     self.assertEqual(3, len(file_locs),
                      "Failed to read all files from the directory")
コード例 #60
0
ファイル: run.py プロジェクト: dword4/legacy
__status__ = "In Development"

from ingest import IngestFile
from search import Search
from configparser import ConfigParser

import logging

logging.basicConfig(filename='run.log',
                    level=logging.DEBUG)  # make config driven later
logging.info('---Start Run---')
config = ConfigParser()
config.read('melchior.conf')

igf = IngestFile()
isf = Search()

filename = 'somefile.txt'  # temporary for testing purposes
try:
    logging.info('Trying to load file:' + filename)
    igf.loadFile(filename)
    logging.info('Successfully loaded file:' + filename)
except my:
    print("failed to load file")
"""
wordset = ['goat','success','Please','smite','Conan','how']

score = 0
wt = 1 # this is temporary

for word in wordset: