コード例 #1
0
ファイル: winged_edge.py プロジェクト: terry07/MoAL
 def __str__(self):
     ppr(self.faces)
     ppr(self.edges)
     ppr(self.vertices)
     ppr(self.left_trav)
     ppr(self.right_trav)
     return ''
コード例 #2
0
 def getObjectTransactions(self, fab, objPHID):
     objInfo = fab.phid.query(phids=[objPHID]).response[objPHID]
     ppr(objInfo)
     objType = objInfo['type']
     if objType in self.getters:
         return self.getters[objType](fab, objPHID)
     raise Exception("unknown object type: %s" % objType)
コード例 #3
0
ファイル: winged_edge.py プロジェクト: Androbos/MoAL
 def __str__(self):
     ppr(self.faces)
     ppr(self.edges)
     ppr(self.vertices)
     ppr(self.left_trav)
     ppr(self.right_trav)
     return ''
コード例 #4
0
 def format(self, fab, transaction, destType='text'):
     key = transaction['transactionType'] + '=>' + destType
     if key in self.describers:
         return self.describers[key](fab, transaction)
     Banner().addLine('INDESCRIBALE TRANSACTION').output()
     ppr(transaction)
     raise Exception("undescribale transaction, type=" +
                     transaction['transactionType'])
コード例 #5
0
ファイル: markov_chain.py プロジェクト: Androbos/MoAL
 def view_probability_history_of(self, node):
     probabilities = []
     for timestep in self.probabilities:
         for node_data in timestep:
             for node_label, value in node_data.iteritems():
                 if node_label == node:
                     probabilities.append(value)
     ppr({node: probabilities})
コード例 #6
0
ファイル: markov_chain.py プロジェクト: terry07/MoAL
 def view_probability_history_of(self, node):
     probabilities = []
     for timestep in self.probabilities:
         for node_data in timestep:
             for node_label, value in node_data.iteritems():
                 if node_label == node:
                     probabilities.append(value)
     ppr({node: probabilities})
コード例 #7
0
ファイル: reification.py プロジェクト: Androbos/MoAL
 def parse(self, sentence):
     tokens = word_tokenize(sentence)
     stop_words = stopwords.words('english')
     tokens = list(set([t for t in tokens if t.lower() not in stop_words]))
     parts = pos_tag(tokens)
     ppr(parts)
     self.tokens = tokens
     self.parts = parts
     return self
コード例 #8
0
ファイル: reification.py プロジェクト: terry07/MoAL
 def parse(self, sentence):
     tokens = word_tokenize(sentence)
     stop_words = stopwords.words('english')
     tokens = list(set([t for t in tokens if t.lower() not in stop_words]))
     parts = pos_tag(tokens)
     ppr(parts)
     self.tokens = tokens
     self.parts = parts
     return self
コード例 #9
0
def trial_exp(func, max=10):
    print('Testing function: "{}"'.format(func.__name__))
    rounds = []
    for x in range(max):
        round = []
        for n in range(max):
            round.append(func(x, n))
        rounds.append(round)
    ppr(rounds)
    return rounds
コード例 #10
0
ファイル: matrix_processing.py プロジェクト: terry07/MoAL
def trial_exp(func, max=10):
    print('Testing function: "{}"'.format(func.__name__))
    rounds = []
    for x in range(max):
        round = []
        for n in range(max):
            round.append(func(x, n))
        rounds.append(round)
    ppr(rounds)
    return rounds
コード例 #11
0
ファイル: stptest.py プロジェクト: kozmikyak/actibase
def complete_link_and_update(meetings):
    format1 = "%A %B %d, %Y - %I:%M %p"
    format2 = "%A %B %d, %Y - "
    format3 = "%m/%d/%y"

    for m in meetings:
        m['link'] = "https://www.stpaul.gov" + m['link']

    for m in meetings:
        ppr(m['info'])
        r = requests.get(m['link'])
        b = html.fromstring(r.text)
        exists = b.xpath('.//div[@class="node-content clearfix"]')
        if len(exists) > 0:
            date = exists[0].xpath(
                './/*/span[@class="date-display-single"]/text()')
            loc1 = exists[0].xpath('.//*/div[@class="thoroughfare"]/text()')
            loc2 = exists[0].xpath('.//*/div[@class="premise"]/text()')
            if len(loc1) > 0:
                m['location'] = loc1[0]
            if len(loc2) > 0:
                m['location'] = m['location'] + " " + loc2[0]
            else:
                m['location'] = 'N/A'
            if ":" in date[0]:
                m['date'] = datetime.strptime(date[0], format1)
            elif "/" in date[0]:
                new_date = date[0].split('/')
                for n in new_date:
                    if len(n) == 1:
                        n = '0' + n
                new_date = '/'.join(new_date)
                m['date'] = datetime.strptime(new_date, format3)
            else:
                date = datetime.strptime(date[0], format2)
                m['date'] = date
            if not 'City Counil' in m[
                    'info'] and not 'Legislative Hearings' in m['info']:
                event = Event(name=m['info'],
                              url=m['link'],
                              start_time=m['date'],
                              location_name=m['location'])
                event.add_committee(m['info'])
            else:
                event = Event(name=m['info'],
                              url=m['link'],
                              start_time=m['date'],
                              location_name=m['location'])
                event.add_committee('Saint Paul City Council')
            yield event
コード例 #12
0
def FFR135_HW3_1(mB=2, epochs=1, eta=0.3, g=sigmoid, dg=dsigmoid):
    tasks = {
        # '3.1.1': [784, 10],
        '3.1.2': [4, 3, 2],
        # '3.1.3': [784, 100, 10],
        # '3.1.4': [784, 100, 100, 10],
        # '3.2': [784, 30, 30, 30, 30, 10],
    }

    np.random.seed(0)

    taskC = {}

    for task in tasks.keys():

        shapeNN = tasks[task]

        L = len(shapeNN)
        HL = L - 1

        W, Theta, dW, dTheta, V, dV, b, Delta = training_containers(
            shapeNN, mB)

        ppr(W)

        inits = init_parameters(shapeNN, Theta, W)

        for l in range(1, L):
            N = shapeNN[l - 1]
            M = shapeNN[l]
            W[l][:M, :N] = inits['W'][l] * 1.
            Theta[l][:M] = inits['Theta'][l] * 0.

        train_perceptron(
            mB,
            epochs,
            1,
            g,
            dg,
            eta,
            shapeNN,
            W,
            Theta,
            dW,
            dTheta,
            V,
            dV,
            b,
            Delta,
        )
コード例 #13
0
ファイル: cigarTest.py プロジェクト: jhrf/Playground
def master():

	with open("./cigarlist.pkl","r") as cigarlist:
		cigarDat = pickle.load(cigarlist)

	cigarDat.extend(cigarDat)
	print "Dat Length = %d" % (len(cigarDat))
	
	res = {}
	funcs = [method_1,method_2,method_3,method_4]
	tries = 10
	
	for fu in funcs:
		tot = 0
		print "Starting: %s" % (fu.__name__,)
		for x in xrange(tries):
			tot += loader(method_1,cigarDat)
		res[fu.__name__] = float(tot) / tries

	print "FINISHED. RESULTS:"
	ppr(res)
コード例 #14
0
ファイル: sparse.py プロジェクト: Androbos/MoAL
if __name__ == '__main__':
    from os import getcwd
    from os import sys
    sys.path.append(getcwd())

from MOAL.helpers.display import Section
from MOAL.helpers.display import prnt
from MOAL.helpers.display import print_h2
from MOAL.helpers.datamaker import make_sparselist
from MOAL.helpers.datamaker import make_sparsematrix
from MOAL.helpers.text import gibberish2
from pprint import pprint as ppr
from random import randrange as rr

DEBUG = True if __name__ == '__main__' else False


if DEBUG:
    with Section('Sparse linear data structures'):
        max = 100
        density = 0.1
        items = {rr(0, max): gibberish2() for _ in range(int(max * density))}
        splist = make_sparselist(items, max)
        prnt('Sparse list', splist)

        sparse_data = {(x, x): gibberish2() for x in range(0, 10)}
        sparsematrix = make_sparsematrix(sparse_data, max, rows=10, cols=10)
        print_h2('Sparse matrix')
        ppr(sparsematrix)
コード例 #15
0
    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            cls._instances[cls] = super(Singleton,
                                        cls).__call__(*args, **kwargs)
        return cls._instances[cls]


if __name__ == '__main__':
    fab = getFab(apiEntry=settings.API_ENTRY, token=settings.API_TOKEN)
    tasks = getAllTasks(fab)
    taskIds = map(lambda x: x['id'], tasks)
    #transactions = fab.maniphest.gettasktransactions(ids = taskIds[1:2]).response
    transactions = fab.maniphest.gettasktransactions(ids=['1']).response
    transactions = reduce(lambda x, y: x + transactions[y], transactions, [])
    ppr(transactions[0].keys())
    ppr(transactions)
    transactions = map(lambda x: (x['transactionPHID'], x['transactionType']),
                       transactions)
    ppr(transactions)
    quit()
    users = getUsers(fab)
    for user in users:
        print user['realName']
        userId = user['phid']
        tasks = fab.maniphest.query(ownerPHIDs=[userId]).response
        tasks = map(lambda x: tasks[x], tasks)
        for task in filter(lambda x: x['isClosed'] == False, tasks):
            ppr(task)
            quit()
            print task['objectName'], task['title'], task['priority']
コード例 #16
0
ファイル: selection_sort.py プロジェクト: Androbos/MoAL
# Based off of algorithm from http://en.wikipedia.org/wiki/Selection_sort


def selection_sort(items):
    num_items = len(items)
    if num_items < 2:
        return items
    curr_min = 0
    for j in range(num_items):
        # Assign minimum to j, initially
        curr_min = j
        # Loop through all elements /after/ j,
        # checking to find the new smallest item.
        for i in range(j + 1, num_items):
            # Update current min if this one is smaller.
            if items[i] < items[curr_min]:
                curr_min = i
        # After the internal loop finishes,
        # check (on each outer iteration) if j is less than the new curr_min.
        # If so, then a smaller item was found and needs to be swapped.
        if curr_min != j:
            swap_item(items, j, curr_min)
    return items


if __name__ == '__main__':
    with Section('Selection Sort'):
        ppr(run_sorting_trials(
            selection_sort, magnitudes=[10, 100, 1000, 5000]))
コード例 #17
0
#        quit()


for bill in bills_lower:
    number = bill['bill_id']
    intro = bill['title']
    status = bill['id']
    try:
        nbill, create = PolicyRecord.objects.get_or_create(number=number,
                                                           intro_text=intro[:512],
                                                           digistate=DIGISTATE,
                                                           primary_org=HOUSE,
                                                           status=status)
        dbill = pyopenstates.get_bill(nbill.status)
        url = dbill['sources'][0]['url']
        nbill.link = url
        nbill.save()
        sponsors = dbill['sponsors']
        for sponsor in sponsors:
            try:
                rep = PublicOfficial.objects.get(city_str=sponsor['leg_id'])
                nbill.authors.add(rep.id)
                nbill.save()
            except:
                print("House oops sponsor: {0}".format(sponsor))

    except:
        ppr(bill)
        print("Error 2")
        quit()
コード例 #18
0
def test_matrix_with_exp(matrix):
    """Test the exponentiation function with a bonafide matrix."""
    ppr(matrix)
    for row in matrix:
        for k in row:
            print(k, exp_by_squaring(k, 2))
コード例 #19
0
# After XPath has found that item, we want it to find the 'tbody' tag
# within our 'MasterTable' tag. And once we've got that 'tbody' item
# we want to grab every single row in that table.
items = base.xpath('.//*[@class="rgMasterTable"]/tbody/tr')

# We got our items, now lets get the data!
# We are going to use a for-loop to go through each of our items
# and parse them further using XPath.

# See if you can decifer what is happening here. Is you use Chrome,
# visit the page we're scraping and 'inspect' it.
for i in items[:5]:
    d = {}
    title = i.xpath('.//td[1]/*/a/font/text()')
    d['desc'] = i.xpath('.//description/text()')
    place = i.xpath('.//td[5]/font/text()')
    d['link'] = i.xpath('.//td[1]/*/a/@href')
    date = i.xpath('.//td[2]/font/text()')
    time = i.xpath('.//td[4]/font/span/font/text()')
    d['title'] = title[0].strip()
    d['loc'] = place[0].strip()
    d['url'] = "https://stpaul.legistar.com/" + d['link'][0]
    time = " ".join(date + time)
    format_date = '%m/%d/%Y %I:%M %p'
    d['real_date'] = datetime.strptime(time, format_date)
    d['desc_xtra'] = i.xpath('.//td[5]/font/*/text()')
    d['deets'] = i.xpath('.//td[6]/*/a/@href')
    d['agenda'] = i.xpath('.//td[7]/font/span/a/@href')
    ppr(d)
コード例 #20
0
m['H'] += [4, 5, 6, 7]

print(m)
# {
#     'H': [1, 2, 3, 4, 5, 6, 7],
#     'He': [3, 4],
#     'Li': [6, 7],
#     'Be': [7, 9, 10]
# }

m['N'] = [13, 14, 15]
print(m)
# {
#     'H': [1, 2, 3, 4, 5, 6, 7],
#     'He': [3, 4],
#     'Li': [6, 7],
#     'Be': [7, 9, 10],
#     'N': [13, 14, 15]
# }

# Use pprint module by importing standard library named pprint.

from pprint import pprint as ppr
ppr(m)
# {
#     'H': [1, 2, 3, 4, 5, 6, 7],
#     'He': [3, 4],
#     'Li': [6, 7],
#     'Be': [7, 9, 10],
#     'N': [13, 14, 15]
# }
コード例 #21
0
ファイル: run.py プロジェクト: gischen/jobScraper
from pprint import pprint as ppr
import helpers

process_first = raw_input('Process json first? y/n ==> ')
run_all_tests = raw_input('Run all tests? y/n ==> ')

if process_first == 'y':
    helpers.process_all()

if run_all_tests == 'y':
    helpers.run_all()
else:
    print 'Enter a spider to run: '
    print helpers.get_spiders_list()
    spider = raw_input('Spider: ==> ')
    if spider == 'careerbuilder':
        print 'Enter a job title to run:'
        print ppr(helpers.load_all_categories())
        keyword = raw_input('Title: ==> ')
        if keyword:
            helpers.process_one(spider, keyword)
コード例 #22
0
 def __str__(self):
     ppr(self.tree)
     return ''
コード例 #23
0
ファイル: pysamPlayground.py プロジェクト: jhrf/Playground
fnm = sys.argv[1]
to_print = int(sys.argv[2])
sam = pysam.Samfile(fnm,"rb")

stat_pack = StatPack(2) 
comps = stat_pack.getComps(100)

pairs = {}

for alig in sam.fetch(until_eof=True):
	try:
		pairs[alig.qname].append({"is_rev":alig.mate_is_reverse,"is_unm":alig.is_unmapped,"seq":alig.seq})
	except KeyError:
		pairs[alig.qname] = [{"is_rev":alig.mate_is_reverse,"is_unm":alig.is_unmapped,"seq":alig.seq}]

full = {}

for i,(name,dat) in enumerate(pairs.items()):
	if not len(dat) == 2: continue 
	r1_seq = dat[0]["seq"]
	r2_seq = dat[1]["seq"]
	scores = map(lambda s: stat_pack.quickScore(comps,s),[r1_seq,r2_seq])
	is_hun = map(lambda sc: sc == 100,scores)

	if any(is_hun):
		full[name] = [((r1_seq,scores[0],dat[0]["is_unm"]),(r2_seq,scores[1],dat[1]["is_unm"]))]
	if i % 10000 == 0: print i

ppr(full.items()[:to_print])
コード例 #24
0
ファイル: people.py プロジェクト: kozmikyak/actibase
    def scrape(self):
        url = 'http://alpha.openstates.org/graphql'
        scrapers = [
            {
                'query':
                '{ people(memberOf:"ocd-organization/e91db6f8-2232-49cd-91af-fdb5adb4ac3b", first: 100) { edges { node { name party: currentMemberships(classification:"party") { organization { name }} links { url } sources { url } chamber: currentMemberships(classification:["upper", "lower"]) { post { label } organization { name classification parent { name }}}}}}}'
            },
            #            { 'query': '{ people(memberOf:"ocd-organization/e91db6f8-2232-49cd-91af-fdb5adb4ac3b", last: 100) { edges { node { name party: currentMemberships(classification:"party") { organization { name }} links { url } sources { url } chamber: currentMemberships(classification:["upper", "lower"]) { post { label } organization { name classification parent { name }}}}}}}'},
            {
                'query':
                '{ people(memberOf:"ocd-organization/6a026144-758d-4d57-b856-9c60dce3c4b5", first: 100) { edges { node { name party: currentMemberships(classification:"party") { organization { name }} links { url } sources { url } chamber: currentMemberships(classification:["upper", "lower"]) { post { label } organization { name classification parent { name }}}}}}}'
            },
        ]

        base = requests.get(url=url, json=scrapers[0])
        base = base.json()
        ppl = base['data']['people']['edges']
        for p in ppl:
            p = p['node']
            if p['name'] in rep_names:
                rep_names.remove(p['name'])

        # Get names unretrieved from primary House API Query
        print('REP NAMES: ', rep_names)
        rep_names.remove('Gene Pelowski')

        for rep in rep_names:
            query = '{ people(memberOf:"ocd-organization/e91db6f8-2232-49cd-91af-fdb5adb4ac3b", first: 100, name: "' + rep + '") { edges { node { name party: currentMemberships(classification:"party") { organization { name }} links { url } sources { url } chamber: currentMemberships(classification:["upper", "lower"]) { post { label } organization { name classification parent { name }}}}}}}'
            query = {'query': query}
            scrapers.append(query)
        for s in scrapers:
            base = requests.get(url=url, json=s)
            base = base.json()
            print(base)
            ppl = base['data']['people']['edges']
            for p in ppl:
                p = p['node']
                orgs = p['chamber']
                rep = Person(name=p['name'], role='State Representative')
                for o in orgs:
                    ppr(o)
                    name = o['organization']['name']
                    classification = o['organization']['classification']
                    if o['organization']['parent']:
                        pname = o['organization']['parent']['name']
                        if pname == 'Minnesota Legislature':
                            label = o['post']['label']
                            if 'House' in name:
                                role = 'State Representative'
                            elif 'Senate' in name:
                                role = 'State Senator'
                            rep.add_term(role,
                                         classification,
                                         district=label,
                                         org_name=name)
                            rep.add_source(p['sources'][0]['url'])

                        else:
                            rep.add_membership(name)
                            rep.add_source(p['sources'][0]['url'])
                yield rep
コード例 #25
0
ファイル: bubble_sort.py プロジェクト: terry07/MoAL
    sys.path.append(getcwd())

from MOAL.helpers.display import Section
from MOAL.helpers.trials import run_sorting_trials
from pprint import pprint as ppr


def bubble_sort(items):
    num_items = len(items)
    if num_items < 2:
        return items
    while num_items > 0:
        for k in range(num_items):
            try:
                if items[k] > items[k + 1]:
                    copy = items[k]
                    copy_next = items[k + 1]
                    items[k] = copy_next
                    items[k + 1] = copy
                elif items[k] == items[k + 1]:
                    continue
            except IndexError:
                continue
        num_items -= 1
    return items


if __name__ == '__main__':
    with Section('Bubble Sort'):
        ppr(run_sorting_trials(bubble_sort, test_output=True))
コード例 #26
0
ファイル: number_theory.py プロジェクト: Androbos/MoAL
def factor_factorials(max_nums):
    for n in range(1, max_nums):
        res = factorial(n)
        for f in factor(res):
            yield ppr({'factors': f, 'num': n, 'factorial': res})
コード例 #27
0
    def scrape(self):
        for c in senate_base:
            m = {}
            m['notice'] = c.xpath('.//p/span[@class="cal_special"]/text()')
            link = c.xpath('.//h3/a/@href')
            print('top link: ', c.xpath('.//h3/*'))
            if len(link) > 0:
                m['link'] = c.xpath('.//h3/a/@href')[0]
                m['title'] = c.xpath('.//h3/a/text()')[0]
            else:
                m['link'] = 'https://www.leg.state.mn.us/cal?type=all'
                m['title'] = c.xpath('.//h3/text()')[0]
            print('top link 2: ', c.xpath('.//h3/text()'))
            info_div = c.xpath('.//div[@class="calendar_p_indent"]')
            if len(info_div) > 0:
                info_div = info_div[0]
                info_list = info_div.xpath('.//text()')
                nchairs = []
                agenda = False
                for il in info_list:
                    il = il.replace('\xa0', '')
                    if il.startswith(' and '):
                        il = il.replace(' and ', '')
                    if il.startswith('Room'):
                        m['room'] = il
                    if il.startswith('Rep.') or il.startswith('Sen.'):
                        cname = pull_middle_name(il[4:])
                        nchairs.append(cname.strip())
                    if agenda == True:
                        m['agenda'] = il
                    if il == 'Agenda: ':
                        agenda = True
                m['chair'] = nchairs
            if len(m['notice']) > 0:
                m['notice'] = m['notice'][0]
            else:
                m['notice'] = 'N/A'
            ppr(m)
            date = c.xpath('.//p/span/text()')
            if len(date) < 1:
                print('\n\n\n\n NO DATE')
                ppr(m)
                continue
            if 'or' in date[0]:
                date[0] = date[0].split('or')[0]
            m['date'] = datetime.datetime.strptime(date[0].replace('\xa0', ''),
                                                   format1)
            ppr(m)
            if not 'room' in m.keys():
                print('oops')
                m['room'] = 'Senate in session'
            event = Event(name=m['title'],
                          start_date=tz.localize(m['date']),
                          location_name=m['room'])

            if len(m['notice']) > 0:
                pass
            event.add_committee(m['title'])
            event.add_source(m['link'])
            for chair in m['chair']:
                event.add_person(name=chair, note="Chair")
            yield event
コード例 #28
0
 def view_messages(self):
     return ppr(super(Logger, self).view())
コード例 #29
0
ファイル: merge_sort.py プロジェクト: terry07/MoAL

def merge_sort(items, iteration=0, side=None):
    # `iteration` and `side` are used for testing purposes,
    # visualizing the recursive nature of the divide and conquer algorithm.
    _len = len(items)
    if _len < 2:
        return items
    pivot = _len // 2
    # Keep subdividing based on pivot,
    # until an empty list is all that is left.
    left = items[:pivot]
    right = items[pivot:]
    # Print each side, keeping track of recursive count to visually
    # indicate how many recursive calls were made.
    # print (side if side else '[ROOT]'), (iteration * 2) * '.', left, right
    return merge(merge_sort(left, iteration=iteration + 1, side='left'),
                 merge_sort(right, iteration=iteration + 1, side='right'))


if __name__ == '__main__':
    with Section('Merge Sort'):
        results = run_sorting_trials(merge_sort)
        ppr(results)

    with Section('Merge Sort - integers'):
        ppr(merge_sort([rr(1, 9999) for _ in range(20)]))

    with Section('Merge Sort - floating point integers'):
        ppr(merge_sort([random() * float(rr(1, 9999)) for _ in range(20)]))
コード例 #30
0
ファイル: endianness.py プロジェクト: terry07/MoAL
 def __str__(self):
     ppr(self.memory.keys())
     ppr(self.memory.values())
     return ''
コード例 #31
0
ファイル: test_files.py プロジェクト: Androbos/MoAL
    for filepath in _get_all_files():
        print('========== [TESTING] Filepath: {}'.format(filepath))
        filename = filepath.split('/')[-1]
        if filename not in BAD_FILES:
            try:
                if TEST_CFG:
                    output = '{}/cfgs/{}.png'.format(
                        dir, filename.replace('.py', ''))
                    os.system(
                        'pycallgraph graphviz --output-file={} -- {}'.format(
                            output, filepath))
                if ADD_STATIC_ANALYSIS:
                    os.system('pylint {}'.format(filepath))
                if TEST_FILES:
                    execfile(filepath)
                if ADD_COVERAGE:
                    print('Getting coverage for: {}'.format(filepath))
                    # Add unique info for each file to combine with later.
                    os.system(
                        'coverage run --source=MOAL -p {}'.format(filepath))
            except EXPECTED_EXCEPTIONS:
                continue
            test_results.append(_result(filepath, sys.exc_info()))
    if ADD_COVERAGE:
        # Combine unique data and then generate report with it.
        os.system('coverage combine')
        os.system('coverage html -d coverage_report')
    if TEST_FILES:
        print('\nTEST RESULTS:')
        ppr(test_results)
コード例 #32
0
ファイル: sql_alchemy.py プロジェクト: Androbos/MoAL
    id = Column(Integer, primary_key=True)
    name = Column(String(255))
    email = Column(String(255))
    url = Column(String(255))

    def __repr__(self):
        return '<Person(name={}, email={}, url={})>'.format(
            self.name, self.email, self.url)


@test_speed
def insert_all(max_records):
    people = [random_person() for n in range(max_records)]
    prnt('Records to create:', people)
    for person in people:
        # Don't need this prop for our example schema.
        del person['address']
        db_session.add(Person(**person))

if DEBUG:
    with Section('MySQL - SQL Alchemy'):
        Base.metadata.create_all(engine)

        print_h2('Adding a bunch of records...')
        run_trials(insert_all, trials=10)

        print_h2('Reading all records...')
        recs = db_session.query(Person).all()
        ppr(recs)
コード例 #33
0
ファイル: threaded_sort.py プロジェクト: terry07/MoAL
        # For each sub group, sort on a separate thread.
        for group in groups:
            self.sorting_queue.put(group)
        return self

    def run(self, items):
        # Make sure thread number is never greater than the number of items.
        num_items = len(items)
        while self.threads > num_items:
            self.threads -= 1
        if num_items < 2:
            return items
        # Prevent passing in div by zero errors.
        if self.threads == 0:
            self.threads = 1
        self._disperse()._enqueue(items)
        # Block until complete.
        self.sorting_queue.join()
        # Perform the second sort on already sorted sublists.
        return self.sorting_func(self.sorted_items)


if __name__ == '__main__':
    with Section('Threaded Sorts'):
        threaded_quicksort = ThreadSort(quick_sort, threads=4)

        rand = random_number_set(max_range=20)
        res = threaded_quicksort.run(rand)
        print('Is valid? {}'.format(res == sorted(rand)))
        ppr(res)
コード例 #34
0
try:
    test_db = couch.create('test_db')
except couchdb.http.PreconditionFailed:
    couch.delete('test_db')
    test_db = couch.create('test_db')


def make_person():
    return {
        'name': faker.name(),
        'email': faker.email(),
        'address': faker.address(),
        'url': faker.url(),
        'created': str(dt.now())
    }


@test_speed
def insert_all(max_records):
    for n in range(max_records):
        test_db.save(make_person())


if DEBUG:
    with Section('CouchDB (via python-couchdb)'):
        run_trials(insert_all, trials=10)
        print_h2('Result from CouchDB execution: ')
        for res in test_db:
            divider()
            ppr(test_db[res])
コード例 #35
0
ファイル: shell_sort.py プロジェクト: Androbos/MoAL
    last_offset = 0
    # Make sure maxgaps is never greater than the number of items.
    while maxgaps > num_items:
        maxgaps -= 1
    # Prevent passing in div by zero errors.
    if maxgaps == 0:
        maxgaps = 1
    # Get the gap division number based on length and maxgaps.
    gap_sections = num_items // maxgaps
    if num_items < 2:
        return items
    for k in range(maxgaps):
        # Add the sub groups by the last_offset and the current gap
        # e.g. [0, 10], [10, 20], [20, 30]...
        sub_groups += insertion_sort(
            items[last_offset:last_offset + gap_sections])
        # Update the last offset for the next index.
        last_offset += gap_sections
    # Return the results with the typical quick sort.
    return quick_sort(sub_groups)


if __name__ == '__main__':
    with Section('Shell Sort'):
        TEST_MAGNITUDES = [4, 10, 50, 100, 500, 1000, 10000]
        # Compare helper sorting functions
        # in isolation to the hybrid shell function
        ppr(run_sorting_trials(shell_sort, magnitudes=TEST_MAGNITUDES))
        ppr(run_sorting_trials(quick_sort, magnitudes=TEST_MAGNITUDES))
        ppr(run_sorting_trials(insertion_sort, magnitudes=TEST_MAGNITUDES))
コード例 #36
0
ファイル: counter_machine.py プロジェクト: Androbos/MoAL
        self.registers[reg] = val


if __name__ == '__main__':
    with Section('Counter Machines'):
        classes = [
            SheperdsonSturgis, Minsky, Program, Abacus, Lambek,
            Successor, SuccessorRAM, ElgotRobinsonRASP,
        ]

        for klass in classes:
            prnt('Testing machine...', repr(klass))
            klass().run()

            cmd_title('New program')
            singleton = CounterMachine()
            singleton._generate_program()
            ppr(singleton.program)
            try:
                singleton.run()
            except TypeError:
                print('Inoperable program was generated :(')
            except NotImplementedError:
                print_error('Not implemented: {}'.format(klass))
            finally:
                singleton.halt()

        print_h2('Random Access Machine (multi-register counter machine)')
        ram = RandomAccessMachine()
        ram.run()
コード例 #37
0
ファイル: run.py プロジェクト: christabor/jobScraper
print('Enter a spider to run: ')
print(gen.get_spiders_list())
spider = raw_input('Spider: ==> ')

# Careerbuilder
if spider == '1':
    process_first = raw_input('Write JSON to app first? y/n ==> ')
    run_all_tests = raw_input('Run all tests? y/n ==> ')
    if process_first == 'y':
        Cb.write_all_to_html()
    if run_all_tests == 'y':
        Cb.run_all()
    else:
        print('Enter a job title to run:')
        ppr(Cb.load_categories())
        keyword = raw_input('Title: ==> ')
        if keyword:
            Cb.process_one(spider, keyword)

# Onet categories
elif spider == '2':
    print('Pick a category to run:')
    print(Onet.load_categories())
    id = raw_input('Choose a job category ID ==> ')
    occupations = Onet.load_occupations(id)
    process_all = raw_input('Process all? y/n ==> ')
    if process_all == 'y':
        Onet.process_all_jobs(id)
    else:
        print('Occupations for ID {}'.format(id))
コード例 #38
0
ファイル: container.py プロジェクト: DivyaShanmugam/MoAL
        for el in val_cont:
            print("Iterator: {}, membership check ({}) => {}".format(val_cont[el], el, el in val_cont))

        prnt("Value Container", val_cont.elements)

        print(val_cont.clear())
        prnt("Empty Container", val_cont.elements)
        assert len(val_cont) == 0

        ref_cont = ReferenceContainer()

        prnt("Reference Container...", ref_cont)

        meta = ReferenceContainer()
        ref_cont.insert("meta", meta)
        ppr(ref_cont.elements)

        # Change original instance
        meta["foo"] = "bar"
        print(ref_cont.elements["meta"].elements)
        print(ref_cont)
        # Reference is also updated.
        ppr(ref_cont.elements["meta"].elements == meta.elements)

        # Examples of usage/extension
        desktop = WindowWidget("MyComputer")
        desktop.attach("Toolbar", WindowWidget("toolbar-01"))
        desktop.attach("Navigation", WindowWidget("navbar-01"))
        print(desktop.retrieve("Toolbar"))
        desktop.show()
コード例 #39
0
ファイル: routing_table.py プロジェクト: Androbos/MoAL
 def __str__(self):
     ppr(self.routes)
     return ''
コード例 #40
0
def ks(feature, obj):
    ppr(type(obj))
    ppr(filter(lambda x: x.find(feature) != -1, dir(obj)))
コード例 #41
0
ファイル: quick_sort.py プロジェクト: terry07/MoAL
def quick_sort(items, low=None, high=None):
    def partition(items, low, high):
        pivot_index = (low + high) / 2
        pivot_value = items[pivot_index]
        items = swap_item(items, pivot_index, high)
        store_index = low
        for k in range(low, high):
            if items[k] < pivot_value:
                items = swap_item(items, k, store_index)
                store_index += 1
        items = swap_item(items, store_index, high)
        return store_index

    num_items = len(items)
    if len(items) < 2:
        return items
    if low is None:
        low = 0
    if high is None:
        high = num_items - 1
    if low < high:
        partitioned = partition(items, low, high)
        quick_sort(items, low=low, high=partitioned - 1)
        quick_sort(items, low=partitioned + 1, high=high)
    return items


if __name__ == '__main__':
    with Section('Quick Sort'):
        ppr(run_sorting_trials(quick_sort, magnitudes=[10, 100, 1000]))
コード例 #42
0
ファイル: tm_gensim.py プロジェクト: terry07/MoAL
        vec_lsi = lsi[vec_bagofwords]
        # print(vec_lsi)
        index = similarities.MatrixSimilarity(lsi[corpus])
        return index[vec_lsi]


if DEBUG:
    with Section('Topic Modeling'):

        doc_tokens = [get_filetokens('topic{}.txt'.format(
                      filenum)) for filenum in range(1, 5)]

        tm = TopicModeler(doc_tokens)

        print_h2('Token frequency')
        ppr(tm.frequency())

        # Compact tokens, scoring and remove empty values
        tm.compactify()

        print_h2('Token ID and bag-of-word as vectors')
        # Convert the document to a vector of ID and bag-of-words
        vectors = tm.get_vectors()
        print(vectors)

        # print_h2('Show (lockstep), the vectors and corresponding docs')
        # for k, doc in enumerate(doc_tokens):
        #     print('{} {}'.format(vectors[k], doc))

        print_h2('Token IDs (via `token2id`)')
        # Show the tokens and ids
コード例 #43
0
ファイル: test_files.py プロジェクト: terry07/MoAL
    for filepath in _get_all_files():
        print('========== [TESTING] Filepath: {}'.format(filepath))
        filename = filepath.split('/')[-1]
        if filename not in BAD_FILES:
            try:
                if TEST_CFG:
                    output = '{}/cfgs/{}.png'.format(
                        dir, filename.replace('.py', ''))
                    os.system(
                        'pycallgraph graphviz --output-file={} -- {}'.format(
                            output, filepath))
                if ADD_STATIC_ANALYSIS:
                    os.system('pylint {}'.format(filepath))
                if TEST_FILES:
                    execfile(filepath)
                if ADD_COVERAGE:
                    print('Getting coverage for: {}'.format(filepath))
                    # Add unique info for each file to combine with later.
                    os.system(
                        'coverage run --source=MOAL -p {}'.format(filepath))
            except EXPECTED_EXCEPTIONS:
                continue
            test_results.append(_result(filepath, sys.exc_info()))
    if ADD_COVERAGE:
        # Combine unique data and then generate report with it.
        os.system('coverage combine')
        os.system('coverage html -d coverage_report')
    if TEST_FILES:
        print('\nTEST RESULTS:')
        ppr(test_results)
コード例 #44
0
 def get_by_priority(self, priority):
     matches = []
     for item in self.items:
         if item['priority'] == priority:
             matches.append(item)
     return ppr(matches)
コード例 #45
0
ファイル: probability.py プロジェクト: Androbos/MoAL
def _test(*args):
    values = [rr(1, 999) for d in range(10)]
    ppr(get_results(values))
コード例 #46
0
ファイル: sparse.py プロジェクト: terry07/MoAL
__author__ = """Chris Tabor ([email protected])"""

if __name__ == '__main__':
    from os import getcwd
    from os import sys
    sys.path.append(getcwd())

from MOAL.helpers.display import Section
from MOAL.helpers.display import prnt
from MOAL.helpers.display import print_h2
from MOAL.helpers.datamaker import make_sparselist
from MOAL.helpers.datamaker import make_sparsematrix
from MOAL.helpers.text import gibberish2
from pprint import pprint as ppr
from random import randrange as rr

DEBUG = True if __name__ == '__main__' else False

if DEBUG:
    with Section('Sparse linear data structures'):
        max = 100
        density = 0.1
        items = {rr(0, max): gibberish2() for _ in range(int(max * density))}
        splist = make_sparselist(items, max)
        prnt('Sparse list', splist)

        sparse_data = {(x, x): gibberish2() for x in range(0, 10)}
        sparsematrix = make_sparsematrix(sparse_data, max, rows=10, cols=10)
        print_h2('Sparse matrix')
        ppr(sparsematrix)
コード例 #47
0
ファイル: basic.py プロジェクト: Androbos/MoAL
    if length < segments:
        length = segments
    return [[_ for _ in combochars(n, segments)] for n in range(2, length)]


def cartesian(max_chars, max_nums, unique=False):
    res = [''.join(prod[0]) for prod in product(
        combochars(max_chars, 2), range(0, max_nums))]
    if unique:
        return list(set(res))
    else:
        return res


if __name__ == '__main__':
    with Section('Combinatorics'):
        prnt('Unique chars', uniqchars(10))
        fact = factorial(12)
        prnt('Final factorial amount:', fact[0])
        ppr(fact[1])
        combos = combochars(4, 2)
        prnt('Permutations of random letters', ', '.join(
            [''.join(combo) for combo in combos]))
        prnt(
            'Combinations of multiple permutations of random letters',
            group_combochars(6, segments=2))
        prnt('Cartesian product of two sets', cartesian(4, 4))
        prnt(
            'Cartesian product of two sets (unique)',
            cartesian(4, 4, unique=True))
コード例 #48
0
ファイル: merge_sort.py プロジェクト: Androbos/MoAL
def merge_sort(items, iteration=0, side=None):
    # `iteration` and `side` are used for testing purposes,
    # visualizing the recursive nature of the divide and conquer algorithm.
    _len = len(items)
    if _len < 2:
        return items
    pivot = _len // 2
    # Keep subdividing based on pivot,
    # until an empty list is all that is left.
    left = items[:pivot]
    right = items[pivot:]
    # Print each side, keeping track of recursive count to visually
    # indicate how many recursive calls were made.
    # print (side if side else '[ROOT]'), (iteration * 2) * '.', left, right
    return merge(
        merge_sort(left, iteration=iteration + 1, side='left'),
        merge_sort(right, iteration=iteration + 1, side='right'))


if __name__ == '__main__':
    with Section('Merge Sort'):
        results = run_sorting_trials(merge_sort)
        ppr(results)

    with Section('Merge Sort - integers'):
        ppr(merge_sort([rr(1, 9999) for _ in range(20)]))

    with Section('Merge Sort - floating point integers'):
        ppr(merge_sort([random() * float(rr(1, 9999)) for _ in range(20)]))
コード例 #49
0
    See "Storing a sparse matrix" for an alternative approach." """
    def __str__(self):
        divider = '-' * 40
        print(divider)
        for node, adjacent in self.nodes.iteritems():
            print('{} is adjacent to {} '.format(node, ', '.join(adjacent)))
        print(divider)
        return ''

    def __setitem__(self, node, neighbors):
        self.nodes[node] = neighbors

    def __getitem__(self, node):
        return self.nodes[node]

    def report(self, vertex):
        return self.__getitem__(vertex)


if __name__ == '__main__':
    with Section('Adjacency list'):
        AList = AdjacencyList()
        AList['A'] = ['B', 'C', 'D']
        AList['B'] = ['A', 'C', 'D']
        AList['C'] = ['A', 'B', 'D']
        AList['D'] = ['A', 'B', 'C']
        print(AList)
        ppr(AList.nodes)

        print(AList.report('B'))
コード例 #50
0
ファイル: hash_list.py プロジェクト: terry07/MoAL
 def __str__(self):
     ppr(self.hash_list)
     return ''
コード例 #51
0
 def __str__(self):
     ppr(self.selectors)
     return ''
コード例 #52
0
    id = Column(Integer, primary_key=True)
    name = Column(String(255))
    email = Column(String(255))
    url = Column(String(255))

    def __repr__(self):
        return '<Person(name={}, email={}, url={})>'.format(
            self.name, self.email, self.url)


@test_speed
def insert_all(max_records):
    people = [random_person() for n in range(max_records)]
    prnt('Records to create:', people)
    for person in people:
        # Don't need this prop for our example schema.
        del person['address']
        db_session.add(Person(**person))


if DEBUG:
    with Section('MySQL - SQL Alchemy'):
        Base.metadata.create_all(engine)

        print_h2('Adding a bunch of records...')
        run_trials(insert_all, trials=10)

        print_h2('Reading all records...')
        recs = db_session.query(Person).all()
        ppr(recs)
コード例 #53
0
ファイル: pymonad_examples.py プロジェクト: Androbos/MoAL
        # Functors
        print_h2('Functors')
        print(neg * pm.List(*range(10)))
        print(neg * pm.Just(0))
        print(neg * pm.Nothing)  # Bottom type?

        nums = sub2 * pm.List(*range(10))
        print(nums)
        # Functor + partial application and composition
        nums2 = comp_partial * pm.List(*range(4))
        print(nums2)

        # List comprehension of sub-lists from Functor with partial
        # application and composition of outer Functor list
        ppr([sub2 * pm.List(
            *range(n)) for n in comp_partial * pm.List(*nums2)])

        # Bind functor and curried function and then re-compose with new values
        print_h2('Applicative functors')
        bound = add2 * pm.List(*range(3)) & pm.List(*range(3))
        ppr([bound, neg * bound])
        # Playing around
        f = map(lambda x: pm.List(range(x)), range(10))
        ppr(f)

        f = add * monoid_range(4) & monoid_range(2)
        print(f)
        print(neg * f)

        # Bind all once
        print_h2('Applicative functors, partial applications and compositions')