def dcinflunce(Input_data,nargout=1): #Input_data=xlsread_('data_dc.xlsx','B1:B8') Id=Input_data[0] In=Input_data[3] Id=Id / In Ie=(0.007 + 0.131 * Id + 167.578 * Id ** 2 - 604.9 * Id ** 3) P1=8.034 + 1.378 * log_(Ie) P2=exp_(- 1.294 + 0.01 / Ie) Sn=Input_data[1] Un=Input_data[2] Pn=Input_data[4] P0n=Input_data[5] P1=P1 * Sn P2=P2 * Sn P0=(P1 + P2) / 2 - P0n PL=Pn * ((1 + Id / In) ** 2 - 1) / 2 P=P0 + PL s=Input_data[6] s=s * 3600 Tn=Input_data[7] Q=P * s T1=Q / (2.06 * 46.5 * 1000) T=T1 + Tn if T < 60: #s[0]=exp_(- T ** 2 / 1800) s=exp_(- T ** 2 / 1800) else: #s[0]=0 s=0 #S=s[0] * 100 S=s * 100 #xlswrite_('dcinfluence.xlsx',T1,'sheet1','A2:A2') #xlswrite_('dcinfluence.xlsx',S,'sheet1','B2:B2') print 'T1=',T1,"S=",S return Id,Ie,P1,P2,T,S
def transform_Logarithm(data, kwargs): ''' returns logarithm of data ''' key = BASE base = kwargs.get(key, kwargDefaults[key]) return {DATA_KEY: func_log(data)/log_(base), }
def results(self, query=None): if query is None: query = self.query if query is None: return [] terms = keywords(' '.join(strip(query.lower()))) terms = (set(terms[0] + terms[1]), set(terms[2])) query = dict() aquery = dict() for term in list(terms[0]): if ':' in term: terms[0].remove(term) l, _, r = term.partition(':') if l == 'tag': aquery.setdefault('tags', list()).append(r) elif l == 'kind': aquery.setdefault('__raw__', dict())['_cls'] = { '$regex' : r, '$options': 'i' } if not terms[0] and not terms[1]: def gen(): for record in Asset.objects(**aquery).only('title', 'description', 'path', 'acl').order_by('created'): yield 1.0, record return gen() for term in terms[0]: query['terms__%s__exists' % (term, )] = True for term in terms[1]: query['terms__%s__exists' % (term, )] = False # Calculate the inverse document frequency for each term idfs = {} num_docs = DocumentIndex.objects.count() for term in terms[0]: term_docs = DocumentIndex.objects(terms__term=term).count() idfs[term] = log_((num_docs - term_docs + 0.5) / (term_docs + 0.5)) # Get the average document length. avg_doc_length = sum([i.length for i in DocumentIndex.objects.only('length')])/float(num_docs) k = 2.0 b = 0.75 f = [] results = [] def compute(idfs, idx, k, b, f): score = 0.0 for term, q in idfs.iteritems(): dividend = idx.terms[term] * (k + 1.0) relDocSize = idx.length / avg_doc_length divisor = q + ( 1.0 - b + b * relDocSize ) * k termScore = (dividend / divisor) * q score += termScore return (score, idx.doc_id) with futures.ThreadPoolExecutor(max_workers=5) as executor: for idx in DocumentIndex.objects(**query): f.append(executor.submit(compute, idfs, idx, k, b, f)) for result in futures.as_completed(f): score, doc_id = result.result() results.append((score, doc_id)) def iterresults(): for score, id_ in results: yield score, Asset.objects(id=id_, **aquery).only('title', 'description', 'path', 'acl').first() return sorted(iterresults(), lambda a, b: cmp(a[0], b[0]), reverse=True)
def log(x): if x == 0: return -np.inf return log_(x)
method_diff = lambda operation: operation[:2] + 'd' + operation[2:] # log function for derivatives actually log|x| from math import log as log_ log = lambda x, *args: 0. if abs(x) < 1e-322 else log_(abs(x), *args) def operation_overload(method): def new_method(self, *args): func = getattr(self.super, method.__name__) gradients = method(self, *args) return Variable(func(*args), parents=[self] + list(args), gradients=gradients) return new_method class Variable(float): def __new__(self, *args, **kwargs): return super().__new__(self, *args) def __init__(self, *args, **kwargs): d_parents, d_gradients = None, None if len(args) > 0: x = args[0] if isinstance(x, Variable): d_parents = [x] d_gradients = [lambda x: 1.]