Пример #1
0
class ResultSet_test(unittest.TestCase):
    def setUp(self):
        self.theSet = ResultSet({'frequency':float,'transmittedPower':Power,'generatedPower':Power})
        self.theSet.append({'frequency':30e3,'transmittedPower':Power(30.,'dBm'),'generatedPower':Power(40.,'dBm')})
        self.theSet.append({'frequency':40e3,'generatedPower':Power(40.,'dBm')})
        
    def test_floats(self):
        self.assertTrue(all(self.theSet['frequency'] == numpy.array([30.0e3,40.0e3])))
    def test_numpySubClasses(self):
        self.assertTrue( (self.theSet['generatedPower'] == Power([40.,40.],'dBm')).all())
    def test_singleValue(self):
        self.assertTrue(self.theSet['transmittedPower'][0] == Power([1.],'W'))
    def test_nanConversion(self):
        self.assertTrue(numpy.isnan(self.theSet['transmittedPower'][1]))
    
    def test_byRow(self):
        rows = self.theSet.byRow()
        row = rows.next()
        self.assertEqual(row['frequency'] , 30e3 )
        self.assertEqual( row['transmittedPower'] , Power(30.,'dBm') )
        self.assertEqual( row['generatedPower'] , Power(40.,'dBm') )
        
        row = rows.next()
        self.assertEqual( row['frequency'] , 40e3 )
        self.assertTrue( numpy.isnan(row['transmittedPower']) )
        self.assertEqual( row['generatedPower'] , Power(40.,'dBm') )
Пример #2
0
class ResultSetXml_test(XmlTest):
    def setUp(self):
        XmlTest.setUp(self)
        self.result = ResultSet({'frequency':float,'transmittedPower':Power,'generatedPower':Power})
        self.result.append({'frequency':30e3,'transmittedPower':Power(30.,'dBm'),'generatedPower':Power(40.,'dBm')})
        self.result.append({'frequency':40e3,'generatedPower':Power(40.,'dBm')})
    def test_loopthrough(self):
        self.assertLoopThrough(ResultSet)
Пример #3
0
def gauss_jordan(A, B, variables=None, iterations=50, eps=0.0000):
    tempA, tempB = cloneMatrix(A), cloneMatrix(B)
    B = matrixToVector(B)

    n = len(B)
    o = [i for i in xrange(n)]
    if variables == None: variables = ['x' + str(i + 1) for i in xrange(n)]

    startTime = timeit.default_timer()
    for k in xrange(0, n):
        partial_pivot(n, k, A, o)
        for i in xrange(0, n):
            if i == k: continue
            factor = A[o[i]][k] / A[o[k]][k]
            for j in xrange(k + 1, n):
                A[o[i]][j] -= factor * A[o[k]][j]
            B[o[i]] -= factor * B[o[k]]

    X = [B[o[i]] / A[o[i]][i] for i in xrange(n)]

    executionTime = timeit.default_timer() - startTime

    tables = {}
    for i in xrange(n):
        tables[variables[i]] = Table(str(variables[i]),
                                     ['Step', variables[i], 'Abs. Error'],
                                     [[1, X[i], '-']])

    return ResultSet(tempA, tempB, variables, 'Gauss-Jordan', tables,
                     vectorToMatrix(X),
                     calcPrecision([0 for i in xrange(n)], variables),
                     executionTime, 1)
def lu_decomposition(A, B, variables=None, iterations=50, eps=0.0000):
    tempA, tempB = cloneMatrix(A), cloneMatrix(B)
    B = matrixToVector(B)
    n = len(B)
    o = [i for i in xrange(n)]
    if variables == None: variables = ['x' + str(i + 1) for i in xrange(n)]

    startTime = timeit.default_timer()
    forward_elimination(n, o, A)
    Y = [B[o[0]] for i in xrange(n)]
    for i in xrange(1, n):
        Y[i] = B[o[i]] - sum([A[o[i]][j] * Y[j] for j in xrange(0, i)])

    X = [Y[n - 1] / A[o[n - 1]][n - 1] for i in xrange(n)]
    for i in xrange(n - 2, -1, -1):
        X[i] = (Y[i] - sum([A[o[i]][j] * X[j]
                            for j in xrange(i + 1, n)])) / A[o[i]][i]

    executionTime = timeit.default_timer() - startTime

    tables = {}
    for i in xrange(n):
        tables[variables[i]] = Table(str(variables[i]),
                                     ['Step', variables[i], 'Abs. Error'],
                                     [[1, X[i], '-']])

    return ResultSet(tempA, tempB, variables, 'LU-Decomposition', tables,
                     vectorToMatrix(X),
                     calcPrecision([0 for i in xrange(n)], variables),
                     executionTime, 1)
Пример #5
0
Файл: row.py Проект: davekr/pypg
 def _get_rel_data_restricted(self, sql):
     attr, pk = self._restricted_table_attr, self._get_pk()
     relation_fk = Structure.get_foreign_key_for_table(
         attr, self._table_name)
     sql.add_where_condition(relation_fk, self.data[pk])
     data = Query().execute_and_fetch(**sql.build_select())
     from resultset import ResultSet
     return ResultSet(data, attr)
Пример #6
0
def lookup_by_range(SR, from_word, to_word, field):
    index = SR.getIndex()
    lexicon = index.getLexicon()
    if index.use_stemmer:
        raise ValueError('Range search is not supported with stemming enabled')
    words = lexicon.getWordsInRange(from_word, to_word, SR.language)
    wids = lexicon.getWordIds(words, SR.language)
    return ResultSet(
        index.getStorage(field).getDocumentsForWordIds(wids),
        [(w, field) for w in words])
Пример #7
0
def lookup_by_substring(SR, pattern, field):
    index = SR.getIndex()
    lexicon = index.getLexicon()
    if index.use_stemmer:
        raise ValueError(
            'Substring search is not supported with stemming enabled')
    words = lexicon.getWordsForSubstring(pattern, SR.language)
    wids = lexicon.getWordIds(words, SR.language)
    return ResultSet(
        index.getStorage(field).getDocumentsForWordIds(wids),
        [(w, field) for w in words])
Пример #8
0
    def match(self, expr):
        out=None
        if isinstance(expr, alloexpr.SelectExpr):
            out=ResultSet(self, col=expr.select, order=expr.order)
        else:
            out=ResultSet(self)

        for row in self.data:
            if expr.val(row):
                out.put(row)

        out.close()

        return out
Пример #9
0
def lookup_by_similarity(SR, pattern, field):
    index = SR.getIndex()
    lexicon = index.getLexicon()
    if index.use_stemmer:
        raise ValueError(
            'Similarity search is not supported with stemming enabled')
    words = [
        word for word, ratio in lexicon.getSimiliarWords(
            pattern, SR.similarity_ratio, SR.language)
    ]
    wids = lexicon.getWordIds(words, SR.language)
    return ResultSet(
        index.getStorage(field).getDocumentsForWordIds(wids),
        [(w, field) for w in words])
Пример #10
0
def gauss_seidel(A, B, X0, variables=None, iterations=50, eps=0.00001):
    tempA, tempB = cloneMatrix(A), cloneMatrix(B)
    B = matrixToVector(B)
    X_new, X_old = X0[:], X0[:]
    n = len(X0)

    iterationRows = [[] for i in xrange(n)]
    ea = ['-' for i in xrange(n)]
    ea_rel = ['-' for i in xrange(n)]
    if variables == None: variables = ['x' + str(i + 1) for i in xrange(n)]
    roots = {}
    for var in variables:
        roots[var] = []

    for j in xrange(n):
        roots[variables[j]].append((1, X0[j]))
        iterationRows[j].append([1, X0[j], '-'])

    startTime = timeit.default_timer()

    for t in xrange(iterations - 1):
        max_ea = -1
        for i in xrange(n):
            sum1 = sum([A[i][k] * X_new[k] for k in xrange(0, i)])
            sum2 = sum([A[i][k] * X_old[k] for k in xrange(i + 1, n)])
            X_new[i] = (B[i] - sum1 - sum2) / A[i][i]

            ea[i] = abs(X_new[i] - X_old[i])
            ea_rel[i] = abs(X_new[i] - X_old[i]) / max(abs(X_new[i]),
                                                       abs(X_old[i]))
            max_ea = max(max_ea, ea[i])
            iterationRows[i].append([t + 2, X_new[i], ea[i]])
            roots[variables[i]].append((t + 2, X_new[i]))

        X_old = X_new[:]
        if max_ea < eps:
            break

    executionTime = timeit.default_timer() - startTime

    tables = {}
    for i in xrange(n):
        tables[variables[i]] = Table(str(variables[i]),
                                     ['Step', variables[i], 'Abs. Error'],
                                     iterationRows[i])

    return ResultSet(tempA, tempB, variables, 'Gauss-Seidel', tables,
                     vectorToMatrix(X_new), calcPrecision(ea_rel, variables),
                     executionTime, t + 2, roots)
Пример #11
0
def lookup_by_phrase(SR, docids, words, field):
    index = SR.getIndex()
    lexicon = index.getLexicon()
    storage = index.getStorage(field)

    if index.use_stemmer:
        S = getStemmer(SR.language)
        if S:
            words = S.stem(words)

    wids = lexicon.getWordIds(words, SR.language)
    docids = [
        docid for docid in docids if storage.hasContigousWordids(docid, wids)
    ]
    return ResultSet(DocidList(docids), [(w, field) for w in words])
Пример #12
0
    def parsedQuery(self,
                    query,
                    params=None,
                    epoch=None,
                    expected_response_code=200,
                    database=None,
                    raise_errors=True,
                    chunked=False,
                    chunk_size=0):

        data = yield self.rawQuery(query, params, epoch,
                                   expected_response_code, database,
                                   raise_errors, chunked, chunk_size)

        results = [
            ResultSet(result, raise_errors=raise_errors)
            for result in data.get('results', [])
        ]

        returnValue(results)
def gauss_elimination(A, B, variables=None, iterations=50, eps=0.0000):
    tempA, tempB = cloneMatrix(A), cloneMatrix(B)
    B = matrixToVector(B)
    n = len(B)
    o = [i for i in xrange(n)]
    if variables == None: variables = ['x' + str(i + 1) for i in xrange(n)]

    startTime = timeit.default_timer()
    forward_elimination(n, o, A, B)
    X = back_substitution(n, o, A, B)
    executionTime = timeit.default_timer() - startTime

    tables = {}
    for i in xrange(n):
        tables[variables[i]] = Table(str(variables[i]),
                                     ['Step', variables[i], 'Abs. Error'],
                                     [[1, X[i], '-']])

    return ResultSet(tempA, tempB, variables, 'Gauss-Elimination', tables,
                     vectorToMatrix(X),
                     calcPrecision([0 for i in xrange(n)], variables),
                     executionTime, 1)
Пример #14
0
 def row_from_id(self, id):
     rs = ResultSet(self)
     rs.put(self.ids[id] if (id in self.ids) else [])
     return rs.close()
Пример #15
0
 def update_and_get(self, **kwargs):
     self._validate_update(kwargs)
     self._sql.add_update_kwargs(kwargs)
     self._sql.add_returning_all()
     data = Query().execute_and_fetch(**self._sql.build_update())
     return ResultSet(data, self._table_name)
Пример #16
0
 def insert_and_get(self, *args, **kwargs):
     self._validate_insert(args, kwargs)
     self._sql.add_insert_kwargs(kwargs)
     self._sql.add_returning_all()
     data = Query().execute_and_fetch(**self._sql.build_insert())
     return ResultSet(data, self._table_name)
Пример #17
0
 def emptyset(self):
     return ResultSet(self)
Пример #18
0
 def moustache(self, base={}):
     return utils.dictassign(ResultSet.moustache(self, base), {
         "name": self.name,
         "list-id": self.listid,
         "is-list": True
     })
Пример #19
0
 def row_from_nationality(self, pays):
     return  ResultSet(self, set=self.pays[pays] if (pays in self.pays) else []).close()
Пример #20
0
 def row_from_director(self, dir):
     return  ResultSet(self, set=self.directors[dir] if (dir in self.directors) else []).close()
Пример #21
0
 def row_from_actor(self, act):
     return  ResultSet(self, set=self.actors[act] if (act in self.actors) else []).close()
Пример #22
0
def lookup_word(SR, word, field):
    index = SR.getIndex()
    lexicon = index.getLexicon()

    if index.use_stemmer:
        # Stemmer support only works with disabled autoexpansion
        S = getStemmer(SR.language)
        if S:
            word = S.stem([word])[0]

        wordid = lexicon.getWordId(word, SR.language)
        if SR.autoexpand != 'off':
            raise ValueError(
                'auto expansion is only available without enabled stemmer support'
            )
        _words, _wids = [word], [wordid]

    else:

        wordid = lexicon.getWordId(word, SR.language)

        # perform autoexpansion only if the length of the given term is longer or
        # equal to the autoexpand_limit configuration parameter of the index

        if (SR.autoexpand=='always' or (SR.autoexpand=='on_miss' and not wordid)) \
            and len(word) >= index.autoexpand_limit:
            # lookup all words with 'word' as prefix
            words = list(lexicon.getWordsForRightTruncation(word, SR.language))

            # obtain wordids for words
            wids = lexicon.getWordIds(words, SR.language)

            # add the original word and wordid
            wids.append(wordid)
            words.append(word)
            _words, _wids = words, wids
        else:
            _words, _wids = [word], [wordid]

    # Thesaurus handling: check if thesaurus is set to a list of configured
    # thesauruses. If yes, perform a lookup for every word and enrich the
    # resultset

    if SR.thesaurus:
        for word in _words[:]:
            for id in SR.thesaurus:
                import zope.component
                from zopyx.txng3.core.interfaces import IThesaurus

                TH = zope.component.queryUtility(IThesaurus, id)
                if TH is None:
                    raise ValueError('No thesaurus "%s" configured' % id)

                related_terms = TH.getTermsFor(word)
                if related_terms:
                    _words.extend(related_terms)
                    wids = lexicon.getWordIds(related_terms, SR.language)
                    _wids.extend(wids)

    return ResultSet(
        index.getStorage(field).getDocumentsForWordIds(_wids),
        [(w, field) for w in _words])
Пример #23
0
 def setUp(self):
     self.theSet = ResultSet({'frequency':float,'transmittedPower':Power,'generatedPower':Power})
     self.theSet.append({'frequency':30e3,'transmittedPower':Power(30.,'dBm'),'generatedPower':Power(40.,'dBm')})
     self.theSet.append({'frequency':40e3,'generatedPower':Power(40.,'dBm')})