def build_condition_list(self, materialVariable): self.condition_list = [] #empty the condition list dm = 1e-6 for node in self.nodes(): #Loop through nodes of graph for otherNode in self[node].keys(): #loop through all egdes from a given node #if node < otherNode: #this returns true for all particles with materialIndex == node (direct comparison isn't supported) checkFrom = operator.and_((materialVariable > (node - dm) ), (materialVariable < (node + dm) )) condIt = 0 for cond in self[node][otherNode].keys(): #loop through all conditions attached to the graph edge op = self[node][otherNode][cond]['operator'] # fun = self[node][otherNode][cond]['function'] #{extract function, operator, value} val = self[node][otherNode][cond]['value'] # condExp = op(fun, val) #Now provide the function & value to the operator, return result as a variable if condIt == 0: totCond = condExp #if this is the first condition, assign to totCond else: #if this is the NOT first condition, combine conditin with previous totCond (using AND or OR) if self[node][otherNode].values()[0]['combineby'] == 'or': totCond = operator.or_(totCond, condExp) else: totCond = operator.and_(totCond, condExp) condIt += 1 #When we pass this on to fn.branching.conditional, we only want to apply it to paticles where # matIndex == node, which occurs where checkFrom == True, 1 combCond = operator.and_(totCond, checkFrom) #combCond = totCond self.condition_list.append(((combCond), otherNode)) self.condition_list.append((True , materialVariable)) #if no conditions are true, return current matId
def N(x, epsilon=1E-4): ''' Plots the hat function but without the discontinuity at x = 1 [1-epsilon, 1+epsilon] is the interval which we correct for the discontinuity in the derivative by replacing it with a cubic function ''' a1 = 1./3*epsilon**-2 a2 = -a1 d1 = 1 - epsilon + a1*epsilon**3 d2 = 1 - epsilon - a2*epsilon**3 b = 0 c = 0 import operator r = where(x < 0, 0, x) condition = operator.and_(0 <= x, x < 1-epsilon) r = where(condition, x, r) condition = operator.and_(1-epsilon <= x, x < 1) expr = a1*(x-1.)**3 + b*(x-1) + c*(x-1) + d1 r = where(condition, expr, r) condition = operator.and_(1 <= x, x < 1+epsilon) expr = a2*(x-1.)**3 + b*(x-1) + c*(x-1) + d2 r = where(condition, expr, r) condition = operator.and_(1+epsilon <= x, x < 2) r = where(condition, 2-x, r) r = where(x >= 2, 0, r) return r
def list(self): ''' Show table ''' # get page index current_page = int(request.GET['page'] if 'page' in request.GET else 1) only_trashed = True if 'trash' in request.GET and request.GET['trash'] == '1' else False # determine limit and offset limit = self.__row_per_page__ offset = (current_page-1) * limit # get the data data_list = self.__model__.get(and_(self.default_criterion(), self.search_criterion()), limit = limit, offset = offset, only_trashed = only_trashed) for data in data_list: data.set_state_list() # calculate page count page_count = int(math.ceil(float(self.__model__.count(and_(self.default_criterion(), self.search_criterion()))/float(limit)))) # serialized get get_pair = [] for key in request.GET: if key == 'page' or request.GET[key] == '': continue get_pair.append(key + '=' + str(request.GET[key])) get_pair = '&'.join(get_pair) # load the view self._setup_view_parameter() self._set_view_parameter(self.__model_name__+'_list', data_list) self._set_view_parameter(self.__model_name__.title(), self.__model__) self._set_view_parameter('__token', self._set_token()) self._set_view_parameter('current_page', current_page) self._set_view_parameter('page_count', page_count) self._set_view_parameter('search_input', self.search_input()) self._set_view_parameter('serialized_get', get_pair) self._set_view_parameter('only_trashed', only_trashed) return self._load_view('list')
def test_jbool_functions_fexprs(self): jl = JeevesLib jl.clear_cache() x = jl.mkLabel('x') jl.restrict(x, lambda (a,_) : a == 42) for lh in (True, False): for ll in (True, False): for rh in (True, False): for rl in (True, False): l = jl.mkSensitive(x, lh, ll) r = jl.mkSensitive(x, rh, rl) self.assertEquals( jl.concretize((42,0), l and r) , operator.and_(lh, rh)) self.assertEquals( jl.concretize((42,0), l and r) , operator.and_(lh, rh)) self.assertEquals( jl.concretize((10,0), l and r) , operator.and_(ll, rl)) self.assertEquals( jl.concretize((10,0), l and r) , operator.and_(ll, rl))
def convertSpeedDirn(theta,rho): """ (modifed from MATLAB compass2cart function) %COMPASS2CART convert speed and direction data (degN) into % cartesian coordinates. % COMPASS2CART(THETA,RHO) convert the vector rho (e.g. speed) with % direction theta (degree North) into cartesian coordinates u and v. % note: theta is in degrees and between 0 and 360. """ try: if theta >= 0 and theta <90: theta=np.abs(theta-90) elif theta >= 90 and theta <= 360: theta=np.abs(450-theta) except: idx = operator.and_(theta>=0.,theta<90.) theta[idx] = np.abs(theta[idx]-90.) idx = operator.and_(theta>=90.,theta<=360.) theta[idx] = np.abs(450.-theta) u,v = pol2cart(theta*np.pi/180,rho) return u, v
def f(t, T): import operator condition = operator.and_(0 < t, t <= T/2.0) x = where(condition,1,t) x = where(t == T/2.0, 0, x) condition = operator.and_(T/2.0 < t, t < T) x = where(condition, -1, x) return x
def Nv(x): r = where(x < 0, 0.0, x) condition = operator.and_(0 <= x, x < 1) r = where(condition, x, r) condition = operator.and_(1 <= x, x < 2) r = where(condition, 2-x, r) r = where(x >= 2, 0.0, r) return r
def Nv2(x): r = x.copy() # avoid modifying x in-place r[x < 0.0] = 0.0 condition = operator.and_(0 <= x, x < 1) r[condition] = x[condition] condition = operator.and_(1 <= x, x < 2) r[condition] = 2-x[condition] r[x >= 2] = 0.0 return r
def writeCaseCallingLines (self, pCallingTexts, pCustomOnly, pFD): # for all the case changes that are needed, writes a line out to call # the method that makes that change for iCallingText in pCallingTexts: if operator.and_ (pCustomOnly==True, iCallingText.find("_clink")!=-1): pFD.write (iCallingText + "(l_make_changes?, p_case_name)\n") if operator.and_ (pCustomOnly==False, iCallingText.find("_clink")==-1): pFD.write (iCallingText + "(l_make_changes?, p_case_name)\n")
def checkVerticalBounds(self,x,y,z): """ Checks that particles are not above the surface or below the seabed (Artificially moves them to the surface or bed if they are). """ SMALL = 0.001 #zbed = -self.dv zbed = -self.z_w[self.Nk-1] # Set the bottom one layer above the seabed ## find the free surface and seabed at the particle locations #if not self.interp_method == 'mesh': # eta_P = self.Hinterp(x,y,z,self.eta) # h_P = self.Hinterp(x,y,z,zbed) #else: # ind = self.UVWinterp.cellind # mask=ind==-1 # ind[mask]=0.0 # eta_P = self.eta[ind] # h_P = zbed[ind] # eta_P[mask]=0.0 # h_P[mask]=0.0 # #pdb.set_trace() #indtop = np.where(z>eta_P) #indbot = np.where(z<h_P) #z[indtop[0]] = eta_P[indtop[0]]-SMALL #z[indbot[0]] = h_P[indbot[0]]+SMALL if not self.interp_method == 'mesh': eta_P = self.Hinterp(x,y,z,self.eta) h_P = self.Hinterp(x,y,z,zbed) else: ind = self.UVWinterp.cellind eta_P = self.eta[ind] h_P = zbed[ind] #indtop = np.where( operator.and_(z>eta_P, ind!=-1) ) #indbot = np.where( operator.and_(z<h_P, ind!=-1) ) #z[indtop[0]] = eta_P[indtop[0]]-SMALL #z[indbot[0]] = h_P[indbot[0]]+SMALL indtop = operator.and_(z>eta_P, ind!=-1) indbot = operator.and_(z<h_P, ind!=-1) #if np.any(indbot): # pdb.set_trace() z[indtop] = eta_P[indtop]-SMALL z[indbot] = h_P[indbot]+SMALL #np.where(z > eta_P, eta_P-SMALL, z) #np.where(z < h_P, h_P+SMALL, z) return z
def getAllProjects(self, state=None, next=False): query = meta.Session.query(Project) if next: projects = query.filter(and_( self.id == Project.realm_id, state == Project.state )).order_by( sa.desc(Project.due)).all() else: projects = query.filter(and_( self.id == Project.realm_id, and_(state == Project.state, 'next' != Project.name))).order_by( sa.desc(Project.due)).all() return projects
def Nv1(x): condition1 = x < 0 condition2 = operator.and_(0 <= x, x < 1) condition3 = operator.and_(1 <= x, x < 2) condition4 = x >= 2 r = np.where(condition1, 0.0, 0.0) r = np.where(condition2, x, r) r = np.where(condition3, 2-x, r) r = np.where(condition4, 0.0, r) return r
def update_request(self, userId, groupId, adminId, response): u = self.requestTable.update(and_(and_( self.requestTable.c.user_id == userId, self.requestTable.c.group_id == groupId), self.requestTable.c.response_date == None)) now = datetime.now(UTC) session = getSession() session.execute(u, params={'responding_user_id': adminId, 'response_date': now, 'accepted': response}) mark_changed(session)
def Nv2(x): condition1 = x < 0 condition2 = operator.and_(0 <= x, x < 1) condition3 = operator.and_(1 <= x, x < 2) condition4 = x >= 2 r = np.zeros(len(x)) r[condition1] = 0.0 r[condition2] = x[condition2] r[condition3] = 2-x[condition3] r[condition4] = 0.0 return r
def f(t, T): cond1 = operator.and_(0 <= t, t < T / 2.) cond2 = abs(t - T / 2.) < 1E-16 cond3 = operator.and_(T / 2. < t, t <= T) cond4 = operator.and_(t < 0, t > T) r = np.zeros(len(t)) r[cond1] = 1 r[cond2] = 0 r[cond3] = -1 r[cond4] = 111 # Error code if len(r[r == 111]) > 0: print 'Error: t must be between 0 and T' r = None return r
def atang(xy): den = xy[0]**(-1) alpha = np.arctan(xy[1]*den)*180.0/pi import operator condition1 = operator.and_(xy[0] < 0.00, xy[1] < 0.0) condition2 = operator.and_(xy[0] > 0.0, xy[1] > 0.0) condition3 = operator.and_(xy[0] > 0.0, xy[1] < 0.0) condition4 = operator.and_(xy[0] < 0.0, xy[1] > 0.0) condition23 =operator.or_(condition2, condition3) r = np.where(condition4, alpha + 180.0, np.where(condition23, alpha, np.where(condition1, alpha - 180.0, 0.0))) return r
def piecewise_vec(x, data): r = np.zeros(len(x)) for i in xrange(len(data) - 1): cond = operator.and_(data[i][1] <= x, x < data[i + 1][1]) cond = operator.or_(cond, x == data[-1][1]) r[cond] = data[i][0] return r
def test_percentile_nasty_partitions(self): # Test percentile with nasty partitions: divide up 5 assets into # quartiles. # There isn't a nice mathematical definition of correct behavior here, # so for now we guarantee the behavior of numpy.nanpercentile. This is # mostly for regression testing in case we write our own specialized # percentile calculation at some point in the future. data = arange(25, dtype=float).reshape(5, 5) % 4 quartiles = range(4) filter_names = ['pct_' + str(q) for q in quartiles] graph = TermGraph( { name: self.f.percentile_between(q * 25.0, (q + 1) * 25.0) for name, q in zip(filter_names, quartiles) } ) results = self.run_graph( graph, initial_workspace={self.f: data}, mask=self.build_mask(ones((5, 5))), ) for name, quartile in zip(filter_names, quartiles): result = results[name] lower = quartile * 25.0 upper = (quartile + 1) * 25.0 expected = and_( nanpercentile(data, lower, axis=1, keepdims=True) <= data, data <= nanpercentile(data, upper, axis=1, keepdims=True), ) check_arrays(result, expected)
def tsearch(self,xin,yin,MAXNODES=8): """ Vectorized version of tseach """ xyin = np.vstack((xin,yin)).T node = self.findnearest(xyin) Np = xin.shape[0] cell = -1*np.ones((Np,MAXNODES),dtype=np.int32) for nn in range(Np): p2c = self.my_pnt2cells(node[nn]) cell[nn,0:len(p2c)]=p2c #cell = [self.pnt2cells(node[nn]) for nn in range(Np)] cellind = -1*np.ones((Np,),dtype=np.int32) for ii in range(MAXNODES): ind = op.and_(cell[:,ii]!=-1,cellind==-1) if any(ind): ind2 = self.inCellVec(cell[ind,ii],xin[ind],yin[ind]) ind3=np.where(ind) cellind[ind3[0][ind2]]= cell[ind3[0][ind2],ii] return cellind
def select_given_all_true(self, conditions, cols_to_select='all'): """Select all the listed columns and return all entries where all the conditions are true. Returns dataframe @param conditions list of tuples describing conditions upon each column (colname/index, 'operator', value) @param cols_to_select list of column names OR list of column numbers OR select all columns For example: select_given_all_true([("uband", "<", 21), ("redshift", ">=", 0.1)]) select_given_all_true([("uband", "<", 21), ("redshift", ">=", 0.1)], ["ra", "dec"]) select_given_all_true([(10, "<", 21), (1, ">=", 0.1)], [2, 3]) """ # check all the column names to return if isinstance(cols_to_select, list): for column in cols_to_select: self._check_column_valid(column) # make condition, start with everything true final_condition = pd.Series(np.ones(self._data.shape[0], dtype=bool)) for condition in conditions: condition_col = condition[0] op = condition[1] val = condition[2] print "Adding condition that column: ", condition_col , op , val final_condition = operator.and_( final_condition, ops[op](self._data[condition_col],val) ) if isinstance(cols_to_select, list): return self._data[final_condition][cols_to_select] else: return self._data[final_condition]
def building_level_rooms_json(level, building_id=None, building_shortname=None): building = facilities.determine_building(id=building_id, shortname=building_shortname) if building is None: flash(u"Gebäude existiert nicht!", 'error') abort(404) all_users = bool(request.args.get('all_users', 0, type=int)) # We need to alias User, otherwise sqlalchemy selects User.id as user_id, # which collides with the joined-loaded user.current_properties.user_id. user = aliased(User) rooms_users_q = (session.session.query(Room, user) .options(joinedload(user.current_properties)) .filter(and_(Room.building == building, Room.level == level)) .join(user)) if not all_users: rooms_users_q = ( rooms_users_q.join(user.current_properties_maybe_denied) .filter(CurrentProperty.property_name == 'network_access') ) level_inhabitants = defaultdict(lambda: []) for room, user in rooms_users_q.all(): level_inhabitants[room].append(user) return jsonify(items=[{ 'room': { 'href': url_for(".room_show", room_id=room.id), 'title': "{:02d} - {}".format(level, room.number) }, 'inhabitants': [user_button(i) for i in inhabitants] } for room, inhabitants in level_inhabitants.items()])
def get_queryset(self): # split the querystring keywords = self.kwargs["query"].split(" ") # AND the words together filter_keywords = reduce(operator.and_, (Q(title__icontains=keyword) for keyword in keywords)) # filter queryset return Story.objects.filter(operator.and_(filter_keywords, Q(temporary=False)))
def index(): """Fills and renders the front page index.html template Only display recent results when they're within the past ~three months. """ recent_time = datetime.datetime.now() - datetime.timedelta(days=90) recent_results = ( Race.query .join(Participant, Race.id == Participant.race_id) .filter(Race.date > recent_time) .group_by(Race.id) .having(func.count(Participant.id) > 0)) r1 = recent_results.subquery('r1') r2 = recent_results.subquery('r2') latest_races = ( db.session.query(r1) .with_entities( r1.c.id.label('id'), r1.c.date.label('date'), RaceClass.name.label('class_name')) .join(r2, and_(r1.c.class_id == r2.c.class_id, r1.c.date < r2.c.date), isouter=True) .join(RaceClass, RaceClass.id == r1.c.class_id) .filter(r2.c.id.is_(None)) .order_by(r1.c.date.desc(), RaceClass.id)) races = latest_races.all() return render_template('index.html', races=races)
def qqplot(self, percentiles=[1.,5.,25.,50.,75.,95.,99.],\ ylims=None, **kwargs): """ Quantile-quantile plot """ idx = operator.and_( ~np.isnan(self.TSmod.y), ~np.isnan(self.TSobs.y) ) q_mod = np.percentile(self.TSmod.y[idx], percentiles) q_obs = np.percentile(self.TSobs.y[idx], percentiles) if ylims is None: ylims = self.ylims # scale the marker size sizes = (1 - np.abs(np.array(percentiles)-50)/50)*50 h1 = plt.scatter(q_obs, q_mod,s=sizes, **kwargs) plt.plot([ylims[0],ylims[1]],[ylims[0],ylims[1]],'k--') ax = plt.gca() ax.set_aspect('equal') plt.xlim(ylims) plt.ylim(ylims) #ax.autoscale(tight=True) plt.grid(b=True) return h1, ax
def smoothed_Heaviside(x, e=1E-2): cond = operator.and_(-e <= x, x <= e) r = np.zeros(len(x)) r[x < -e] = 0.0 r[cond] = 0.5 + x[cond] / (2 * e) + 1 / (2 * pi) * sin(pi * x[cond] / e) r[x > e] = 1.0 return r
def buildSWField(self, pSheet, pRow): # Looks through the row given in pRow and constructs a # XLSToSWField object that contains the data, returns the field record. lUsed = pSheet.cell_value(pRow, SSHEETCOLUMN_MAPFIELD_P) if operator.or_(lUsed.lower() == 'no', lUsed.lower() == 'no-temporary'): raise XLSToSWExceptions.FieldNotMapped( repr(pSheet) + ':' + repr(pRow)) lFieldDefaultValue = '' lClassName = pSheet.cell_value(pRow, SSHEETCOLUMN_PNITABLENAME).strip() lFieldName = pSheet.cell_value(pRow, SSHEETCOLUMN_PNIATTRIBUTENAME).strip() lFieldExternalName = pSheet.cell_value( pRow, SSHEETCOLUMN_PNITABLEEXTERNALNAME).strip() lFieldType = pSheet.cell_value(pRow, SSHEETCOLUMN_PNIATTRIBUTETYPE).strip() lFieldDefaultValue = pSheet.cell_value( pRow, SSHEETCOLUMN_PNIATTRIBUTEDEFAULTVALUE) lFieldLength = pSheet.cell_value(pRow, SSHEETCOLUMN_PNIATTRIBUTELENGTH) lFieldPriority = pSheet.cell_value(pRow, SSHEETCOLUMN_PNIATTRIBUTEPRIORITY) lFieldText = self.buildSWFieldComment(pSheet, pRow) lFieldFromTable = pSheet.cell_value( pRow, SSHEETCOLUMN_FOREIGNTABLENAME).strip() lFieldFromField = pSheet.cell_value( pRow, SSHEETCOLUMN_FOREIGNATTRIBUTENAME).strip() lFeaturePoint = pSheet.cell_value( pRow, SSHEETCOLUMN_FEATUREPOINTDESCRIPTION) if operator.and_(lFeaturePoint != "", self.s_show_features_p == True): print("----------Feature " + repr(lFeaturePoint)) lField = XLSToSWField.XLSToSWField(lClassName, lFieldName, lFieldType) lField.s_field_external_name = lFieldExternalName if lFieldLength != '': lField.s_field_length = lFieldLength if lFieldPriority != '': lField.s_field_priority = lFieldPriority if lFieldText != '': lField.s_field_comment = lFieldText if lFieldDefaultValue != '': lField.s_field_default_value = lFieldDefaultValue if lField.fieldType().lower() == "join": lField.s_field_join_type = pSheet.cell_value( pRow, SSHEETCOLUMN_PNIJOINTYPE) lField.s_field_join_to = pSheet.cell_value(pRow, SSHEETCOLUMN_PNIJOINTO) print("is a valid join " + repr(lField.isValidJoin())) if lField.isValidJoin() == False: print("found an invalid join ") if lFieldFromTable != '': lField.s_field_from_table = lFieldFromTable if lFieldFromField != '': lField.s_field_from_field = lFieldFromField lField.showMe() return lField
def pause_count(records, min_pause, max_pause): # The meaning of minVol # is still the minimum volume to count, but now it determines # exclusion not inclusion. silences = filter(lambda x: (operator.eq(0, int(x[3]))), records) time_diffs = [int(records[i][0]) - int(records[i - 1][0]) for i in range(len(records))] return len( filter(lambda x: (operator.and_(operator.gt(x, min_pause), operator.lt(x, max_pause))), time_diffs) ) / float(len(records) - 1)
def findField (self, pClassName, pFieldName): # Returns the XLSToSWField for th epClassName, pFieldName pair for iField in self.s_fields: if operator.and_((iField.className() == pClassName),(iField.fieldName() == pFieldName)): return iField return False
def filtro_item(item, **parametros): """ docstring """ resultado = True for valor in parametros: resultado = operator.and_(resultado, operator.eq( str(item[valor]).strip().upper(), str(parametros[valor]).strip().upper())) return resultado
def get_object_list(self, request): current_user = Users.objects.get(email=request.user.username) invoices = reduce(operator.or_, [c.invoices_set.all() for c in current_user.clients_set.all()]) if not invoices: return InvoiceTransaction.objects.none() queryset = reduce(operator.or_, [t.invoicetransaction_set.all() for t in invoices]) return operator.and_(super(HiPayInvoice, self).get_object_list(request).all(), queryset).distinct()
def and_(self, a, b): return operator.and_(a, b)
def __rand__(self, y): return NonStandardInteger(operator.and_(y, self.val))
def testOperators(self): with self.cached_session(): var_f = variables.Variable([2.0]) add = var_f + 0.0 radd = 1.0 + var_f sub = var_f - 1.0 rsub = 1.0 - var_f mul = var_f * 10.0 rmul = 10.0 * var_f div = var_f / 10.0 rdiv = 10.0 / var_f lt = var_f < 3.0 rlt = 3.0 < var_f le = var_f <= 2.0 rle = 2.0 <= var_f gt = var_f > 3.0 rgt = 3.0 > var_f ge = var_f >= 2.0 rge = 2.0 >= var_f neg = -var_f abs_v = abs(var_f) var_i = variables.Variable([20]) mod = var_i % 7 rmod = 103 % var_i var_b = variables.Variable([True, False]) and_v = operator.and_(var_b, [True, True]) or_v = operator.or_(var_b, [False, True]) xor_v = operator.xor(var_b, [False, False]) invert_v = ~var_b rnd = np.random.rand(4, 4).astype("f") var_t = variables.Variable(rnd) slice_v = var_t[2, 0:0] var_m = variables.Variable([[2.0, 3.0]]) matmul = var_m.__matmul__([[10.0], [20.0]]) rmatmul = var_m.__rmatmul__([[10.0], [20.0]]) variables.global_variables_initializer().run() self.assertAllClose([2.0], self.evaluate(add)) self.assertAllClose([3.0], self.evaluate(radd)) self.assertAllClose([1.0], self.evaluate(sub)) self.assertAllClose([-1.0], self.evaluate(rsub)) self.assertAllClose([20.0], self.evaluate(mul)) self.assertAllClose([20.0], self.evaluate(rmul)) self.assertAllClose([0.2], self.evaluate(div)) self.assertAllClose([5.0], self.evaluate(rdiv)) self.assertAllClose([-2.0], self.evaluate(neg)) self.assertAllClose([2.0], self.evaluate(abs_v)) self.assertAllClose([True], self.evaluate(lt)) self.assertAllClose([False], self.evaluate(rlt)) self.assertAllClose([True], self.evaluate(le)) self.assertAllClose([True], self.evaluate(rle)) self.assertAllClose([False], self.evaluate(gt)) self.assertAllClose([True], self.evaluate(rgt)) self.assertAllClose([True], self.evaluate(ge)) self.assertAllClose([True], self.evaluate(rge)) self.assertAllClose([6], self.evaluate(mod)) self.assertAllClose([3], self.evaluate(rmod)) self.assertAllClose([True, False], self.evaluate(and_v)) self.assertAllClose([True, True], self.evaluate(or_v)) self.assertAllClose([True, False], self.evaluate(xor_v)) self.assertAllClose([False, True], self.evaluate(invert_v)) self.assertAllClose(rnd[2, 0:0], self.evaluate(slice_v)) self.assertAllClose([[80.0]], self.evaluate(matmul)) self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], self.evaluate(rmatmul))
def __and__(self, y): return NonStandardInteger(operator.and_(self.val, y))
#!/usr/bin/python3 """ Побитовое И """ from operator import and_ x = 1 # 0001 a = x & 1 # bitwise AND: 0001 b = and_(x, 1) print(a, b)
def _create_methods(arith_method, radd_func, comp_method, bool_method, use_numexpr, special=False, default_axis='columns'): # creates actual methods based upon arithmetic, comp and bool method # constructors. # NOTE: Only frame cares about default_axis, specifically: special methods # have default axis None, whereas flex methods have default axis 'columns' # if we're not using numexpr, then don't pass a str_rep if use_numexpr: op = lambda x: x else: op = lambda x: None if special: def names(x): if x[-1] == "_": return "__%s_" % x else: return "__%s__" % x else: names = lambda x: x radd_func = radd_func or operator.add # Inframe, all special methods have default_axis=None, flex methods have # default_axis set to the default (columns) new_methods = dict( add=arith_method(operator.add, names('add'), op('+'), default_axis=default_axis), radd=arith_method(radd_func, names('radd'), op('+'), default_axis=default_axis), sub=arith_method(operator.sub, names('sub'), op('-'), default_axis=default_axis), mul=arith_method(operator.mul, names('mul'), op('*'), default_axis=default_axis), truediv=arith_method(operator.truediv, names('truediv'), op('/'), truediv=True, fill_zeros=np.inf, default_axis=default_axis), floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'), default_axis=default_axis, fill_zeros=np.inf), # Causes a floating point exception in the tests when numexpr # enabled, so for now no speedup mod=arith_method(operator.mod, names('mod'), None, default_axis=default_axis, fill_zeros=np.nan), pow=arith_method(operator.pow, names('pow'), op('**'), default_axis=default_axis), # not entirely sure why this is necessary, but previously was included # so it's here to maintain compatibility rmul=arith_method(operator.mul, names('rmul'), op('*'), default_axis=default_axis, reversed=True), rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'), default_axis=default_axis, reversed=True), rtruediv=arith_method(lambda x, y: operator.truediv(y, x), names('rtruediv'), op('/'), truediv=True, fill_zeros=np.inf, default_axis=default_axis, reversed=True), rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x), names('rfloordiv'), op('//'), default_axis=default_axis, fill_zeros=np.inf, reversed=True), rpow=arith_method(lambda x, y: y ** x, names('rpow'), op('**'), default_axis=default_axis, reversed=True), rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'), default_axis=default_axis, fill_zeros=np.nan, reversed=True), ) new_methods['div'] = new_methods['truediv'] new_methods['rdiv'] = new_methods['rtruediv'] # Comp methods never had a default axis set if comp_method: new_methods.update(dict( eq=comp_method(operator.eq, names('eq'), op('==')), ne=comp_method(operator.ne, names('ne'), op('!='), masker=True), lt=comp_method(operator.lt, names('lt'), op('<')), gt=comp_method(operator.gt, names('gt'), op('>')), le=comp_method(operator.le, names('le'), op('<=')), ge=comp_method(operator.ge, names('ge'), op('>=')), )) if bool_method: new_methods.update(dict( and_=bool_method(operator.and_, names('and_'), op('&')), or_=bool_method(operator.or_, names('or_'), op('|')), # For some reason ``^`` wasn't used in original. xor=bool_method(operator.xor, names('xor'), op('^')), rand_=bool_method(lambda x, y: operator.and_(y, x), names('rand_'), op('&')), ror_=bool_method(lambda x, y: operator.or_(y, x), names('ror_'), op('|')), rxor=bool_method(lambda x, y: operator.xor(y, x), names('rxor'), op('^')) )) new_methods = dict((names(k), v) for k, v in new_methods.items()) return new_methods
# --------------- Python 10 --------------- # ----------- Logicke Operacije ----------- import operator x = True y = False # Binarni operator logicko i (&): print("binarni operator logicko i - x&y je: ", end="") print(operator.and_(x, y)) # Binarni operator logicko ili (|): print("binarni operator logicko ili - x|y je: ", end="") print(operator.or_(x, y)) # Unarni operator negacije (~x): print("Unarni operator negacije - ~x: ", end="") print(operator.not_(x)) ''' Operand 1 and or Operand 2 True True True True True False True False False False True True False False False False '''
def test_and_(a: bool, b: bool): assert op.and_(a)(b) == operator.and_(a, b)
class TVMScriptParser(Transformer): """Synr AST visitor pass which finally lowers to TIR. Notes for Extension ------------------- 1. To support a new type of AST node, add a function transform_xxx(). 2. To support new functions, add the function to the appropriate registry: We divide allowed function calls in TVM script into 3 categories, intrin, scope_handler and special_stmt. 1. intrin functions are low level functions like mod, load, and constants. They correspond to a tir `IRNode`. They must have a return value. The user can register intrin functions for the parser to use. 2. scope_handler functions have no return value. They take two arguments: the parser and the AST node. scope_handler functions are used in with and for statements. 3. special_stmt functions handle cases that do not have a corresponding tir `IRNode`. These functions take the parser and the AST node as arguments and may return a value. When visiting a Call node, we check the special_stmt registry first. If no registered function is found, we then check the intrin registry. When visiting With node, we check the with_scope registry. When visiting For node, we check the for_scope registry. """ _binop_maker = { ast.BuiltinOp.Add: tvm.tir.Add, ast.BuiltinOp.Sub: tvm.tir.Sub, ast.BuiltinOp.Mul: tvm.tir.Mul, ast.BuiltinOp.Div: tvm.tir.Div, ast.BuiltinOp.FloorDiv: tvm.tir.FloorDiv, ast.BuiltinOp.Mod: tvm.tir.FloorMod, ast.BuiltinOp.BitOr: lambda lhs, rhs, span: operator.or_(lhs, rhs), ast.BuiltinOp.BitAnd: lambda lhs, rhs, span: operator.and_(lhs, rhs), ast.BuiltinOp.BitXor: lambda lhs, rhs, span: operator.xor(lhs, rhs), ast.BuiltinOp.GT: tvm.tir.GT, ast.BuiltinOp.GE: tvm.tir.GE, ast.BuiltinOp.LT: tvm.tir.LT, ast.BuiltinOp.LE: tvm.tir.LE, ast.BuiltinOp.Eq: tvm.tir.EQ, ast.BuiltinOp.NotEq: tvm.tir.NE, ast.BuiltinOp.And: tvm.tir.And, ast.BuiltinOp.Or: tvm.tir.Or, } _unaryop_maker = { ast.BuiltinOp.USub: lambda rhs, span: operator.neg(rhs), ast.BuiltinOp.Invert: lambda rhs, span: operator.invert(rhs), ast.BuiltinOp.Not: tvm.tir.Not, } def __init__(self, base_lienno, tir_namespace): self.context = None self.base_lineno = base_lienno self.current_lineno = 0 self.current_col_offset = 0 self.tir_namespace = tir_namespace self.meta = None def init_function_parsing_env(self): """Initialize function parsing environment""" self.context = ContextMaintainer(self.report_error) # scope emitter def init_meta(self, meta_dict): if meta_dict is not None: self.meta = tvm.ir.load_json(json.dumps(meta_dict)) def transform(self, node): """Generic transformation for visiting the AST. Dispatches to `transform_ClassName` for the appropriate ClassName.""" old_lineno, old_col_offset = self.current_lineno, self.current_col_offset if hasattr(node, "lineno"): self.current_lineno = self.base_lineno + node.lineno - 1 if hasattr(node, "col_offset"): self.current_col_offset = node.col_offset method = "transform_" + node.__class__.__name__ visitor = getattr(self, method, self.generic_visit) transform_res = visitor(node) self.current_lineno, self.current_col_offset = old_lineno, old_col_offset return transform_res def match_tir_namespace(self, identifier: str) -> bool: """Check if the namespace is equal to tvm.script.tir""" return identifier in self.tir_namespace def report_error(self, message: str, span: Union[ast.Span, tvm.ir.Span]): """Report an error occuring at a location. This just dispatches to synr's DiagnosticContext. Parameters ---------- message : str Error message span : Union[synr.ast.Span, tvm.ir.Span】 Location of the error """ if isinstance(span, tvm.ir.Span): span = synr_span_from_tvm(span) self.error(message, span) def parse_body(self, parent): """Parse remaining statements in this scope. Parameters ---------- parent : synr.ast.Node Parent node of this scope. Errors will be reported here. """ body = [] spans = [] stmt = parent while len(self.context.node_stack[-1]) > 0: stmt = self.context.node_stack[-1].pop() spans.append(stmt.span) res = self.transform(stmt) if res is not None: body.append(res) if len(body) == 0: self.report_error( "Expected another statement at the end of this block. Perhaps you " "used a concise statement and forgot to include a body afterwards.", stmt.span, ) else: return ( tvm.tir.SeqStmt(body, tvm_span_from_synr(ast.Span.union(spans))) if len(body) > 1 else body[0] ) def parse_arg_list(self, func, node_call): """Match the arguments of a function call in the AST to the required arguments of the function. This handles positional arguments, positional arguments specified by name, keyword arguments, and varargs. Parameters ---------- func : Function The function that provides the signature node_call: ast.Call The AST call node that calls into the function. Returns ------- arg_list : list The parsed positional argument. """ assert isinstance(node_call, ast.Call) # collect arguments args = [self.transform(arg) for arg in node_call.params] kw_args = { self.transform(k): self.transform(v) for k, v in node_call.keyword_params.items() } # get the name and parameter list of func if isinstance(func, (Intrin, ScopeHandler, SpecialStmt)): func_name, param_list = func.signature() else: self.report_error( "Internal Error: function must be of type Intrin, ScopeHandler or SpecialStmt, " f"but it is {type(func).__name__}", node_call.span, ) # check arguments and parameter list and get a list of arguments reader = CallArgumentReader(func_name, args, kw_args, self, node_call) pos_only, kwargs, varargs = param_list internal_args = list() for i, arg_name in enumerate(pos_only): internal_args.append(reader.get_pos_only_arg(i + 1, arg_name)) for i, arg_info in enumerate(kwargs): arg_name, default = arg_info internal_args.append(reader.get_kwarg(i + 1 + len(pos_only), arg_name, default=default)) if varargs is not None: internal_args.extend(reader.get_varargs(len(pos_only) + len(kwargs) + 1)) elif len(args) + len(kw_args) > len(pos_only) + len(kwargs): self.report_error( "Arguments mismatched. " + f"Expected {len(pos_only) + len(kwargs)} args but got " + f"{len(args) + len(kw_args)}", node_call.span, ) return internal_args def parse_type(self, type_node, parent): """Parse a type annotation. We require the parent object to the type so that we have a place to report the error message if the type does not exist. """ if type_node is None: self.report_error("A type annotation is required", parent.span) res_type = self.transform(type_node) return tvm.ir.TupleType([]) if res_type is None else res_type.evaluate() def generic_visit(self, node): """Fallback visitor if node type is not handled. Reports an error.""" self.report_error(type(node).__name__ + " AST node is not supported", node.span) def transform_Module(self, node): """Module visitor Right now, we only support two formats for TVM Script. Example ------- 1. Generate a PrimFunc (If the code is printed, then it may also contain metadata) .. code-block:: python import tvm @tvm.script def A(...): ... # returns a PrimFunc func = A 2. Generate an IRModule .. code-block:: python import tvm @tvm.script.ir_module class MyMod(): @T.prim_func def A(...): ... @T.prim_func def B(...): ... __tvm_meta__ = ... # returns an IRModule mod = MyMod """ if len(node.funcs) == 1: return self.transform(next(iter(node.funcs.values()))) elif len(node.func) == 0: self.report_error( "You must supply at least one class or function definition", node.span ) else: self.report_error( "Only one-function, one-class or function-with-meta source code is allowed", ast.Span.union([x.span for x in list(node.funcs.values())[1:]]), ) def transform_Class(self, node): """Class definition visitor. A class can have multiple function definitions and a single :code:`__tvm_meta__` statement. Each class corresponds to a single :code:`IRModule`. Example ------- .. code-block:: python @tvm.script.ir_module class MyClass: __tvm_meta__ = {} def A(): T.evaluate(0) """ if len(node.assignments) == 1: if not ( len(node.assignments[0].lhs) == 1 and isinstance(node.assignments[0].lhs[0], ast.Var) and node.assignments[0].lhs[0].id.name == "__tvm_meta__" ): self.report_error( "The only top level assignments allowed are `__tvm_meta__ = ...`", node.assignments[0].span, ) self.init_meta( MetaUnparser().do_transform(node.assignments[0].rhs, self._diagnostic_context) ) elif len(node.assignments) > 1: self.report_error( "Only a single top level `__tvm_meta__` is allowed", ast.Span.union([x.span for x in node.assignments[1:]]), ) return IRModule( {GlobalVar(name): self.transform(func) for name, func in node.funcs.items()} ) def transform_Function(self, node): """Function definition visitor. Each function definition is translated to a single :code:`PrimFunc`. There are a couple restrictions on TVM Script functions: 1. Function arguments must have their types specified. 2. The body of the function can contain :code:`func_attr` to specify attributes of the function (like it's name). 3. The body of the function can also contain multiple :code:`buffer_bind`s, which give shape and dtype information to arguments. 4. Return statements are implicit. Example ------- .. code-block:: python @T.prim_func def my_function(x: T.handle): # 1. Argument types T.func_attr({"global_symbol": "mmult"}) # 2. Function attributes X_1 = tir.buffer_bind(x, [1024, 1024]) # 3. Buffer binding T.evaluate(0) # 4. This function returns 0 """ def check_decorator(decorators: List[ast.Expr]) -> bool: """Check the decorator is `T.prim_func""" if len(decorators) != 1: return False d: ast.Expr = decorators[0] return ( isinstance(d, ast.Attr) and isinstance(d.object, ast.Var) and self.match_tir_namespace(d.object.id.name) and d.field.name == "prim_func" ) self.init_function_parsing_env() self.context.enter_scope(nodes=node.body.stmts) # add parameters of function for arg in node.params: arg_var = tvm.te.var(arg.name, self.parse_type(arg.ty, arg)) self.context.update_symbol(arg.name, arg_var, node) self.context.func_params.append(arg_var) if not check_decorator(node.decorators): self.report_error( "All functions should be decorated by `T.prim_func`", node.span, ) # fetch the body of root block body = self.parse_body(node.body) # return a tir.PrimFunc dict_attr = self.context.func_dict_attr ret_type = self.parse_type(node.ret_type, node) if node.ret_type is not None else None func = tvm.tir.PrimFunc( self.context.func_params, body, ret_type, buffer_map=self.context.func_buffer_map, attrs=tvm.ir.make_node("DictAttrs", **dict_attr) if dict_attr else None, span=tvm_span_from_synr(node.span), ) # New Scope : Implicit root block # Each function contains an implicit root block in TensorIR, # so here we need a block scope for it. # If the PrimFunc is not a TensorIR func (e.g. TE scheduled func or low-level func), # the root block will not be added. The logic to add root block is in `_ffi_api.Complete` # Fix the PrimFunc # 1. generate root block if necessary # 2. generate surrounding loops for blocks if necessary func = call_with_error_reporting( self.report_error, node.span, _ffi_api.Complete, func, self.context.root_alloc_buffers, ) self.context.exit_scope() return func def transform_Lambda(self, node): """Lambda visitor Return an array of input parameters and the transformed lambda body. """ self.context.enter_scope(nodes=[node.body]) # add parameters of the lambda arg_vars = [] for arg in node.params: arg_var = tvm.te.var(arg.name) arg_vars.append(arg_var) self.context.update_symbol(arg.name, arg_var, node) # the body of a lambda must be an expr if not isinstance(node.body, ast.Expr): self.report_error("The body of a lambda must be an expression", node.span) # transform the body of the lambda body = self.transform(node.body) self.context.exit_scope() return arg_vars, body def transform_Assign(self, node): """Assign visitor AST abstract grammar: Assign(expr* targets, expr value, string? type_comment) By now 3 patterns of Assign is supported: 1. special stmts with return value 1.1 Buffer = T.match_buffer()/T.buffer_decl() 1.2 Var = T.var() 1.3 Var = T.env_thread() 2. (BufferStore) Buffer[PrimExpr, PrimExpr, ..., PrimExpr] = PrimExpr 3. (Store) Var[PrimExpr] = PrimExpr 4. with scope handlers with concise scoping and var def 4.1 var = T.allocate() """ if isinstance(node.rhs, ast.Call): # Pattern 1 & Pattern 4 func = self.transform(node.rhs.func_name) if isinstance(func, WithScopeHandler): if not func.concise_scope or not func.def_symbol: self.report_error( "with scope handler " + func.signature()[0] + " is not suitable here", node.rhs.span, ) # Pattern 4 arg_list = self.parse_arg_list(func, node.rhs) func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span) func.body = self.parse_body(node) return func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span) elif isinstance(func, SpecialStmt): # Pattern 1 arg_list = self.parse_arg_list(func, node.rhs) func.handle(node, self.context, arg_list, node.rhs.func_name.span) return self.parse_body(node) else: value = self.transform(node.rhs) if len(node.lhs) == 1 and not isinstance(node.lhs[0], ast.Var): # This is a little confusing because it only is true when # we have taken this branch. We might need to clarify what # exectly is allowed in Assignments in tvmscript. self.report_error( "Left hand side of assignment must be an unqualified variable", node.span, ) ast_var = node.lhs[0] var = tvm.te.var( ast_var.id.name, self.parse_type(node.ty, ast_var), span=tvm_span_from_synr(ast_var.span), ) self.context.update_symbol(var.name, var, node) body = self.parse_body(node) self.context.remove_symbol(var.name) return tvm.tir.LetStmt(var, value, body, span=tvm_span_from_synr(node.span)) self.report_error( """Assignments should be either 1. A "special statement" with return value 1.1 Buffer = T.match_buffer()/T.buffer_decl() 1.2 Var = T.var() 1.3 Var = T.env_thread() 2. A store into a buffer: Buffer[PrimExpr, PrimExpr, ..., PrimExpr] = PrimExpr 3. A store into a variable: Var[PrimExpr] = PrimExpr 4. A with scope handler with concise scoping and var def 4.1 var = T.allocate()""", node.span, ) def transform_SubscriptAssign(self, node): """Visitor for statements of the form :code:`x[1] = 2`.""" symbol = self.transform(node.params[0]) indexes = self.transform(node.params[1]) rhs = self.transform(node.params[2]) rhs_span = tvm_span_from_synr(node.params[2].span) if isinstance(symbol, tvm.tir.Buffer): # BufferStore return tvm.tir.BufferStore( symbol, tvm.runtime.convert(rhs, span=rhs_span), indexes, span=tvm_span_from_synr(node.span), ) else: if symbol.dtype == "handle" and len(indexes) != 1: self.report_error( "Handles only support one-dimensional indexing. Use `T.match_buffer` to " "construct a multidimensional buffer from a handle.", node.params[0].span, ) if len(indexes) != 1: self.report_error( f"Store is only allowed with one index, but {len(indexes)} were provided.", node.params[1].span, ) # Store return tvm.tir.Store( symbol, tvm.runtime.convert(rhs, span=rhs_span), indexes[0], tvm.runtime.convert(True, span=tvm_span_from_synr(node.span)), span=tvm_span_from_synr(node.span), ) def transform_Assert(self, node): """Assert visitor Pattern corresponds to concise mode of :code:`with T.Assert()`. """ condition = self.transform(node.condition) if node.msg is None: self.report_error("Assert statements must have an error message.", node.span) message = self.transform(node.msg) body = self.parse_body(node) return tvm.tir.AssertStmt( condition, tvm.runtime.convert(message), body, span=tvm_span_from_synr(node.span) ) def transform_For(self, node): """For visitor AST abstract grammar: For(expr target, expr iter, stmt* body, stmt* orelse, string? type_comment) By now 1 pattern of For is supported: 1. for scope handler for name in T.serial()/T.parallel()/T.vectorized()/T.unroll()/range()/ T.grid()/T.thread_binding() """ if not isinstance(node.rhs, ast.Call): self.report_error("The loop iterator should be a function call.", node.rhs.span) func = self.transform(node.rhs.func_name) if not isinstance(func, ForScopeHandler): self.report_error( "Only For scope handlers can be used in a for statement.", node.rhs.func_name.span ) # prepare for new for scope old_lineno, old_col_offset = self.current_lineno, self.current_col_offset self.current_lineno = node.span.start_line self.current_col_offset = node.span.start_column self.context.enter_scope(nodes=node.body.stmts) # for scope handler process the scope arg_list = self.parse_arg_list(func, node.rhs) func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span) func.body = self.parse_body(node) res = func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span) # exit the scope self.context.exit_scope() self.current_lineno, self.current_col_offset = old_lineno, old_col_offset return res def transform_While(self, node): """While visitor AST abstract grammar: While(expr condition, stmt* body) """ condition = self.transform(node.condition) # body self.context.enter_scope(nodes=node.body.stmts) body = self.parse_body(node) self.context.exit_scope() return tvm.tir.While(condition, body, span=tvm_span_from_synr(node.span)) def transform_With(self, node): """With visitor AST abstract grammar: With(withitem* items, stmt* body, string? type_comment) withitem = (expr context_expr, expr? optional_vars) By now 2 patterns of With is supported: 1. with scope handler with symbol def with T.block(*axes)/T.allocate() as targets: 2. with scope handler without symbol def with T.let()/T.Assert()/T.attr()/T.realize() """ if not isinstance(node.rhs, ast.Call): self.report_error( "The context expression of a `with` statement should be a function call.", node.rhs.span, ) func = self.transform(node.rhs.func_name) if not isinstance(func, WithScopeHandler): self.report_error( f"Function {func} cannot be used in a `with` statement.", node.rhs.func_name.span ) # prepare for new block scope old_lineno, old_col_offset = self.current_lineno, self.current_col_offset self.current_lineno = node.body.span.start_line self.current_col_offset = node.body.span.start_column self.context.enter_block_scope(nodes=node.body.stmts) # with scope handler process the scope arg_list = self.parse_arg_list(func, node.rhs) func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span) func.body = self.parse_body(node) res = func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span) # exit the scope self.context.exit_block_scope() self.current_lineno, self.current_col_offset = old_lineno, old_col_offset return res def transform_If(self, node): """If visitor AST abstract grammar: If(expr test, stmt* body, stmt* orelse) """ condition = self.transform(node.condition) # then body self.context.enter_scope(nodes=node.true.stmts) then_body = self.parse_body(node) self.context.exit_scope() # else body if len(node.false.stmts) > 0: self.context.enter_scope(nodes=node.false.stmts) else_body = self.parse_body(node) self.context.exit_scope() else: else_body = None return tvm.tir.IfThenElse( condition, then_body, else_body, span=tvm_span_from_synr(node.span) ) def transform_Call(self, node): """Call visitor 3 different Call patterns are allowed: 1. Intrin representing a PrimExpr/IterVar 1.1 tir.int/uint/float8/16/32/64/floormod/floordiv/load/cast/ramp/broadcast/max 1.2 tir.range/reduce_axis/scan_axis/opaque_axis 2. tir.Op(dtype, ...) 3. other callable functions """ if isinstance(node.func_name, ast.Op): if node.func_name.name == ast.BuiltinOp.Subscript: return self.transform_Subscript(node) if node.func_name.name in self._binop_maker: lhs = self.transform(node.params[0]) # There is no supertype for everything that can appear in # an expression, so we manually add what we might get here. if not isinstance(lhs, (tvm.tir.PrimExpr, BufferSlice)): # We would really like to report a more specific # error here, but this parser contains no distinction # between parsing statements and parsing expressions. All # rules just call `transform`. self.report_error( f"Left hand side of binary op must be a PrimExpr, " "but it is a {type(lhs).__name__}", node.params[0].span, ) rhs = self.transform(node.params[1]) if not isinstance(rhs, (tvm.tir.PrimExpr, BufferSlice)): self.report_error( f"Right hand side of binary op must be a PrimExpr, " "but it is a {type(rhs).__name__}", node.params[1].span, ) return call_with_error_reporting( self.report_error, node.span, lambda node, lhs, rhs, span: self._binop_maker[node.func_name.name]( lhs, rhs, span=span ), node, lhs, rhs, tvm_span_from_synr(node.span), ) if node.func_name.name in self._unaryop_maker: rhs = self.transform(node.params[0]) return self._unaryop_maker[node.func_name.name]( rhs, span=tvm_span_from_synr(node.span) ) self.report_error(f"Unsupported operator {node.func_name.name}.", node.func_name.span) else: func = self.transform(node.func_name) if isinstance(func, Intrin) and not func.stmt: # pattern 1 arg_list = self.parse_arg_list(func, node) return call_with_error_reporting( self.report_error, node.func_name.span, func.handle, arg_list, node.func_name.span, ) else: args = [self.transform(arg) for arg in node.params] kw_args = { self.transform(k): self.transform(v) for k, v in node.keyword_params.items() } if isinstance(func, tvm.tir.op.Op): if not "dtype" in kw_args.keys(): self.report_error(f"{func} requires a dtype keyword argument.", node.span) # pattern 2 return tvm.tir.Call( kw_args["dtype"], func, args, span=tvm_span_from_synr(node.span) ) elif callable(func): # pattern 3 return func(*args, **kw_args) else: self.report_error( f"Function is neither callable nor a tvm.tir.op.Op (it is a {type(func)}).", node.func_name.span, ) def transform_UnassignedCall(self, node): """Visitor for statements that are function calls. This handles function calls that appear on thier own line like `tir.realize`. Examples -------- .. code-block:: python @T.prim_func def f(): A = T.buffer_decl([10, 10]) T.realize(A[1:2, 1:2], "") # This is an UnassignedCall A[1, 1] = 2 # This is also an UnassignedCall """ # Only allowed builtin operator that can be a statement is x[1] = 3 i.e. subscript assign. if isinstance(node.call.func_name, ast.Op): if node.call.func_name.name != ast.BuiltinOp.SubscriptAssign: self.report_error( "Binary and unary operators are not allowed as a statement", node.span ) else: return self.transform_SubscriptAssign(node.call) # handle a regular function call func = self.transform(node.call.func_name) arg_list = self.parse_arg_list(func, node.call) if isinstance(func, tir.scope_handler.AssertHandler): self.report_error( "A standalone `T.Assert` is not allowed. Use `assert condition, message` " "instead.", node.call.func_name.span, ) if isinstance(func, Intrin): if func.stmt: return call_with_error_reporting( self.report_error, node.call.func_name.span, func.handle, arg_list, node.call.func_name.span, ) else: self.report_error(f"This intrinsic cannot be used as a statement.", node.call.span) elif isinstance(func, WithScopeHandler) and func.concise_scope and not func.def_symbol: func.enter_scope(node, self.context, arg_list, node.call.func_name.span) func.body = self.parse_body(node) return func.exit_scope(node, self.context, arg_list, node.call.func_name.span) elif isinstance(func, SpecialStmt) and not func.def_symbol: func.handle(node, self.context, arg_list, node.call.func_name.span) return self.report_error( "Unexpected statement. Expected an assert, an intrinsic, a with statement, or a " f"special statement, but got {type(func).__name__}.", node.call.func_name.span, ) def transform_Slice(self, node): start = self.transform(node.start) end = self.transform(node.end) if not (isinstance(node.step, ast.Constant) and node.step.value == 1): self.report_error("Only step size 1 is supported for slices.", node.step.span) return Slice(start, end) def transform_Subscript(self, node): """Array access visitor. By now only 3 types of Subscript are supported: 1. Buffer[index, index, ...], Buffer element access(BufferLoad & BufferStore) Var[index] Buffer element access() 2. Buffer[start: stop, start: stop, ...], BufferRealize(realize(buffer[...])) 3. Array[index], Buffer element access """ symbol = self.transform(node.params[0]) if symbol is None: self.report_error( f"Variable {node.params[0].id.name} is not defined.", node.params[0].span ) indexes = [self.transform(x) for x in node.params[1].values] if isinstance(symbol, tvm.tir.expr.Var): if symbol.dtype == "handle": self.report_error( "Cannot read directly from a handle, use `T.match_buffer` " "to create a buffer to read from.", node.params[0].span, ) if len(indexes) > 1: self.report_error( "Only a single index can be provided when indexing into a `var`.", node.params[1].span, ) index = indexes[0] if not isinstance(index, (tvm.tir.PrimExpr, int)): self.report_error( "Var load index should be an int or PrimExpr, but it is a" + type(index), node.span, ) return call_with_error_reporting( self.report_error, node.span, tvm.tir.Load, "float32", symbol, index, True, span=tvm_span_from_synr(node.span), ) elif isinstance(symbol, tvm.tir.Buffer): return BufferSlice( symbol, indexes, self.report_error, span=tvm_span_from_synr(node.span) ) elif isinstance(symbol, tvm.container.Array): if len(indexes) > 1: self.report_error( "Array access should be one-dimension access, but the indices are " + str(indexes), node.span, ) index = indexes[0] if not isinstance(index, (int, tvm.tir.expr.IntImm)): self.report_error( "Array access index expected int or IntImm, but got " + type(index), node.span, ) if int(index) >= len(symbol): self.report_error( f"Array access out of bound, size: {len(symbol)}, got index {index}.", node.span, ) return symbol[int(index)] else: self.report_error( f"Cannot subscript from a {type(symbol).__name__}. Only variables and " "buffers are supported.", node.params[0].span, ) def transform_Attr(self, node): """Visitor for field access of the form `x.y`. This visitor is used to lookup function and symbol names. We have two cases to handle here: 1. If we have a statement of the form `tir.something`, then we lookup `tir.something` in the `Registry`. If the function is not in the registry, then we try to find a `tvm.ir.op.Op` with the same name. 2. All other names `tvm.something` are lookup up in this current python namespace. """ def get_full_attr_name(node: ast.Attr) -> str: reverse_field_names = [node.field.name] while isinstance(node.object, ast.Attr): node = node.object reverse_field_names.append(node.field.name) if isinstance(node.object, ast.Var): reverse_field_names.append(node.object.id.name) return ".".join(reversed(reverse_field_names)) if isinstance(node.object, (ast.Var, ast.Attr)): full_attr_name = get_full_attr_name(node) attr_object, fields = full_attr_name.split(".", maxsplit=1) if self.match_tir_namespace(attr_object): func_name = "tir." + fields res = Registry.lookup(func_name) if res is not None: return res try: return tvm.ir.op.Op.get(func_name) except TVMError as e: # Check if we got an attribute error if e.args[0].find("AttributeError"): self.report_error(f"Unregistered function `tir.{fields}`.", node.span) else: raise e symbol = self.transform(node.object) if symbol is None: self.report_error("Unsupported Attribute expression.", node.object.span) if not hasattr(symbol, node.field.name): self.report_error( f"Type {type(symbol)} does not have a field called `{node.field.name}`.", node.span ) res = getattr(symbol, node.field.name) return res def transform_TypeAttr(self, node): """Visitor for field access of the form `x.y` for types. We have two cases here: 1. If the type is of the form `T.something`, we look up the type in the `tir` namespace in this module. 2. If the type is of the form `tvm.x.something` then we look up `tvm.x.something` in this modules namespace. """ if isinstance(node.object, ast.TypeVar): if self.match_tir_namespace(node.object.id.name): if not hasattr(tir, node.field.name): self.report_error( f"Invalid type annotation `tir.{node.field.name}`.", node.span ) return getattr(tir, node.field.name) symbol = self.transform(node.object) if symbol is None: self.report_error("Unsupported Attribute expression", node.object.span) if not hasattr(symbol, node.field): self.report_error( f"Type {type(symbol)} does not have a field called `{node.field}`.", node.span ) res = getattr(symbol, node.field) return res def transform_DictLiteral(self, node): """Dictionary literal visitor. Handles dictionary literals of the form `{x:y, z:2}`. """ keys = [self.transform(key) for key in node.keys] values = [self.transform(value) for value in node.values] return dict(zip(keys, values)) def transform_Tuple(self, node): """Tuple visitor. Handles tuples of the form `(x, y, 2)`. """ return tuple(self.transform(element) for element in node.values) def transform_ArrayLiteral(self, node): """List literal visitor. Handles lists of the form `[x, 2, 3]`. """ return [self.transform(element) for element in node.values] def transform_Var(self, node): """Variable visitor Handles variables like `x` in `x = 2`. """ name = node.id.name if name == "meta": return self.meta symbol = Registry.lookup(name) if symbol is not None: return symbol symbol = self.context.lookup_symbol(name) if symbol is not None: return symbol self.report_error(f"Unknown identifier {name}.", node.span) def transform_TypeVar(self, node): """Type variable visitor. Equivalent to `transform_Var` but for types. """ name = node.id.name symbol = Registry.lookup(name) or self.context.lookup_symbol(name) if symbol is not None: return symbol self.report_error(f"Unknown identifier {name}.", node.span) def transform_Constant(self, node): """Constant value visitor. Constant values include `None`, `"strings"`, `2` (integers), `4.2` (floats), and `true` (booleans). """ return tvm.runtime.convert(node.value, span=tvm_span_from_synr(node.span)) def transform_TypeConstant(self, node): """Constant value visitor for types. See `transform_Constant`. """ return node.value def transform_Return(self, node): self.report_error( "TVM script does not support return statements. Instead the last statement in any " "block is implicitly returned.", node.span, )
def set_proximity_director(self, swarm, proximityVar, minDistanceFn=fn.misc.constant(1.), maxDistanceFn=fn.misc.constant(1.), locFac=1., searchFac=2, directorVar=False): ################# #Part1 ################ """ | \ / locFacNeg*-1 | \/ | /\ | / \ locFacPos ________ 0-locFac-1 """ locFacPos = self.thickness - (self.thickness * locFac) locFacNeg = -1. * (self.thickness * locFac) #this is a relative thickness, default is 1. #try to save an evaluation if type(minDistanceFn) == uw.function.misc.constant: thickness = minDistanceFn.value else: thickness = minDistanceFn.evaluate(swarm) #First, we want to rebuild the minimum distance... sd, pts0 = self.compute_signed_distance(swarm.particleCoordinates.data, distance=searchFac * self.thickness) #if any Nans appears set them to infs sd[np.where(np.isnan(sd))[0]] = np.inf #everthing in the min dist halo becomes fault. if not self.empty: mask = np.logical_and( sd < locFacPos * thickness, #positive side of fault sd > locFacNeg * thickness)[:, 0] #negative side of fault proximityVar.data[mask] = self.ID #set to Id ################# #Part2 ################ #particles with proximity == self.ID, beyond the retention distance, set to zero #I had to do these separately for the two sides of the fault #thickness becomes the maxDistanceFunction #try to save an evaluation if type(maxDistanceFn) == uw.function.misc.constant: thickness = maxDistanceFn.value else: thickness = maxDistanceFn.evaluate(swarm) #treat each side of the fault seperately #parallel protection if sd.shape[0] == proximityVar.data.shape[0]: mask1 = operator.and_(sd > locFacPos * thickness, proximityVar.data == self.ID) proximityVar.data[mask1] = 0 mask2 = operator.and_(sd < locFacNeg * thickness, proximityVar.data == self.ID) proximityVar.data[mask2] = 0 ################# #Part3 ################ if directorVar: #director domain will be larger than proximity, but proximity will control rheology #searchFac *self.thickness should capture the max proximity distance from the fault #hence, must be set in relation to the maxDistanceFn dv, nzv = self.compute_normals(swarm.particleCoordinates.data, searchFac * self.thickness) mask = np.where(proximityVar.data == self.ID)[0] directorVar.data[mask, :] = dv[mask, :]
def bitwise_and_usecase(x, y): return operator.and_(x, y)
def rand_(left, right): return operator.and_(right, left)
def get_results_by_query(startPage, perPageRecords, node_id, name, args=None): searchTerm = None columns = [] columnsDefined = False try: startPage = int(args['start']) perPageRecords = int(args['length']) if 'search[value]' in args and (args['search[value]'] != ""): searchTerm = (args['search[value]']) if (args['columns[0][data]']): columnsDefined = True except: print('error in request') results = [] count = db.session.query(ResultLog).filter( and_(ResultLog.name == name, and_(ResultLog.node_id == node_id, ResultLog.action != 'removed'))).count() countFiltered = count if searchTerm: queryCountStr = "select count(distinct id) from result_log join jsonb_each_text(result_log.columns) e on true where node_id='" + str( node_id ) + "' and e.value ilike " + "'%" + searchTerm + "%'" + " and name=" + "'" + name + "'" + " and action!='removed'" filtered_quer = db.engine.execute(sqlalchemy.text(queryCountStr)) for r in filtered_quer: countFiltered = r[0] queryStr = "select distinct id,columns from result_log join jsonb_each_text(result_log.columns) e on true where node_id='" + str( node_id ) + "' and e.value ilike " + "'%" + searchTerm + "%'" + " and name=" + "'" + name + "'" + " and action!='removed' order by id desc OFFSET " + str( startPage) + " LIMIT " + str(perPageRecords) record_query = db.engine.execute(sqlalchemy.text(queryStr)) for r in record_query: results.append(r[1]) else: record_query = db.session.query(ResultLog.columns).filter( and_(ResultLog.node_id == (node_id), and_(ResultLog.name == name, ResultLog.action != 'removed'))).order_by( sqlalchemy.desc(ResultLog.id)).offset( startPage).limit(perPageRecords).all() results = [r for r, in record_query] if results: firstRecord = results[0] TO_CAPTURE_COlUMNS = [] if 'action' in firstRecord: if 'PROC_' in firstRecord['action']: TO_CAPTURE_COlUMNS = [ 'utc_time', 'action', 'path', 'parent_path' ] elif 'Close' in firstRecord[ 'action'] or 'Accept' in firstRecord[ 'action'] or 'Connect' in firstRecord['action']: TO_CAPTURE_COlUMNS = [ 'utc_time', 'action', 'process_name', 'protocol', 'local_address', 'local_port', 'remote_address' 'remote_port' ] elif 'DELETE' in firstRecord['action'] or 'READ' in firstRecord[ 'action'] or 'WRITE' in firstRecord['action']: TO_CAPTURE_COlUMNS = [ 'utc_time', 'action', 'process_name', 'md5', 'target_path' ] elif 'event_type' in firstRecord: if 'dns_req' == firstRecord[ 'event_type'] or 'dns_res' == firstRecord['event_type']: TO_CAPTURE_COlUMNS = [ 'domain_name', 'resolved_ip', 'utc_time', 'request_type', 'request_class' ] elif 'http_req' == firstRecord['event_type']: TO_CAPTURE_COlUMNS = [ 'utc_time', 'url', 'remote_port', 'process_name' ] if len(TO_CAPTURE_COlUMNS) == 0: for key in firstRecord.keys(): columns.append({'data': key, 'title': key}) else: columns.append({ "className": 'details-control', "orderable": False, "data": None, "defaultContent": '' }) for key in firstRecord.keys(): if key in TO_CAPTURE_COlUMNS: columns.append({'data': key, 'title': key}) output = {} try: output['sEcho'] = str(int(request.values['sEcho'])) except: print('error in echo') output['iRecordsFiltered'] = str(countFiltered) output['iTotalRecords'] = str(count) output['pageLength'] = str(perPageRecords) output['iTotalDisplayRecords'] = str(countFiltered) aaData_rows = results # add additional rows here that are not represented in the database # aaData_row.append(('''''' % (str(row[ self.index ]))).replace('\\', '')) if not columnsDefined: output['columns'] = columns output['aaData'] = aaData_rows return output
def test_bitwise_and(self): self.failUnless(operator.and_(0xf, 0xa) == 0xa)
def intersectvec(A,B,C,D): return op.and_( op.ne(ccwvec(A,C,D),ccwvec(B,C,D)),op.ne(ccwvec(A,B,C),ccwvec(A,B,D)) )
def test_bitwise_and(self): self.assertRaises(TypeError, operator.and_) self.assertRaises(TypeError, operator.and_, None, None) self.assertTrue(operator.and_(0xf, 0xa) == 0xa)
def test_bitwise_and(self): self.failUnlessRaises(TypeError, operator.and_) self.failUnlessRaises(TypeError, operator.and_, None, None) self.failUnless(operator.and_(0xf, 0xa) == 0xa)
operator.delitem(li,slice(2, 4)) print("\nthe modified list after delitem() is : ",end="") for i in range(0,len(li)): print(li[i],end=" ") print("\nthe 1st and 2nd element of list is : ",end=" ") print(operator.getitem(li,slice(0, 2))) # s1 = "geeksfor" s2 = "geeks" print("\nthe concatenated string is : ",end="") print(operator.concat(s1, s2)) if(operator.contains(s1, s2)): print("geeksfor contain geeks") else: print("geeksfor does not contain geeks") #bitwise a = 3 b= 4 print("\nthe bitwise and of a and b is : ",end="") print(operator.and_(a, b)) print("the bitwise or of a and b is : ",end="") print(operator.or_(a, b)) print("the bitwise xor of a and b is : ",end=" ") print(operator.xor(a, b)) print("the inverted value of a is : ",end="") print(operator.invert(a))
def get_category_with_id_and_current_user_id(categoryId): return Category.query.filter( and_(Category.id == int(categoryId), Category.userId == current_user.id)).first()
def notified_thesedays(cls, now, user_id): session = Session() return session.query( session.query(cls).filter( and_(cls.target_user == user_id, cls.created_at > (now - timedelta(days=3)))).exists()).scalar()
def owns_current_user_categoryType(type): return Category.query.filter( and_(Category.type == type, Category.userId == current_user.id)).first() is not None
def job(): now = datetime.now() current_time = now.strftime("%H:%M:%S") global position # Generate a panda from alpaca data panda = api.get_barset(alpacaTicker, '5Min', limit=600).df # Turn panda to csv panda.to_csv(os.path.abspath(os.getcwd()) + '/' + ticker + '.csv') # This function to calculate the indicator values on the csv and update calvIndicators() # Read the updated csv to a panda nowData = pd.read_csv(file1) # Get the last row date of the panda. Date = nowData["Unnamed: 0"][nowData.index[-1]] toolbox = base.Toolbox() toolbox.register("compile", gp.compile, pset=pset) i = operator.and_( if_then_else(position, operator.gt(rsi(Date, 7), 42), operator.lt(rsi(Date, 7), 53)), operator.lt(ma(Date, 50), ma(Date, 10))) rule = toolbox.compile(expr=i) action = rule(Date, position) if action and position == False: buy = True sell = False elif not action and position == False: sell = False buy = False elif action and position == True: sell = False buy = False elif not action and position == True: sell = True buy = False if buy: print(current_time, ": Buy") position = True if trade: api.submit_order(symbol=alpacaTicker, qty=numShares, side='buy', time_in_force='gtc', type='market') elif sell: print(current_time, ": Sell") position = False if trade: api.submit_order(symbol=alpacaTicker, qty=numShares, side='sell', time_in_force='gtc', type='market') elif position == False: print(current_time, ": Wait") else: print(current_time, ": Hold") return
def owns_current_user_noteId(noteId): return (Note.query.filter( and_(Note.id == int(noteId), Note.userId == current_user.id)).first()) is not None
def owns_current_user_categoryId(categoryId): return Category.query.filter( and_(Category.id == int(categoryId), Category.userId == current_user.id)).first() is not None
# In[556]: dimTemp = dp.deltaTemp * temperatureField + dp.surfaceTemp + dp.potentialTemp * fn.math.exp( ndp.dissipation * depthFn) - dp.potentialTemp # ## Critical temperature analysis # In[558]: TC_K0 = 1250 + 273. TC_K1 = 1300 + 273. # In[562]: conditions = [(operator.and_(dimTemp > TC_K0, dimTemp < TC_K1), 1.), (True, 0.)] critTempFn = fn.branching.conditional(conditions) # In[564]: get_ipython().magic(u'pinfo glucifer.objects.Contours') # In[567]: fig = glucifer.Figure(quality=3) fig.append(glucifer.objects.Points(swarm, critTempFn, pointSize=2)) #fig.append( glucifer.objects.Surface(mesh, pressureField,valueRange=[-1e3, 1e3], pointSize=1))
def get_category_with_type_and_current_user_id(type): return Category.query.filter( and_(Category.type == type, Category.userId == current_user.id)).first()
class ExpressionError(SyntaxError): """A Snuggs-specific syntax error.""" filename = "<string>" lineno = 1 op_map = { '*': lambda *args: functools.reduce(lambda x, y: operator.mul(x, y), args), '+': lambda *args: functools.reduce(lambda x, y: operator.add(x, y), args), '/': lambda *args: functools.reduce(lambda x, y: operator.truediv(x, y), args), '-': lambda *args: functools.reduce(lambda x, y: operator.sub(x, y), args), '&': lambda *args: functools.reduce(lambda x, y: operator.and_(x, y), args), '|': lambda *args: functools.reduce(lambda x, y: operator.or_(x, y), args), '<': operator.lt, '<=': operator.le, '==': operator.eq, '!=': operator.ne, '>=': operator.ge, '>': operator.gt } def asarray(*args): if len(args) == 1 and hasattr(args[0], '__iter__'): return numpy.asanyarray(list(args[0])) else: return numpy.asanyarray(list(args))
def testOperators(self): with self.test_session(): var_f = tf.Variable([2.0]) add = var_f + 0.0 radd = 1.0 + var_f sub = var_f - 1.0 rsub = 1.0 - var_f mul = var_f * 10.0 rmul = 10.0 * var_f div = var_f / 10.0 rdiv = 10.0 / var_f lt = var_f < 3.0 rlt = 3.0 < var_f le = var_f <= 2.0 rle = 2.0 <= var_f gt = var_f > 3.0 rgt = 3.0 > var_f ge = var_f >= 2.0 rge = 2.0 >= var_f neg = -var_f abs_v = abs(var_f) var_i = tf.Variable([20]) mod = var_i % 7 rmod = 103 % var_i var_b = tf.Variable([True, False]) and_v = operator.and_(var_b, [True, True]) or_v = operator.or_(var_b, [False, True]) xor_v = operator.xor(var_b, [False, False]) invert_v = ~var_b rnd = np.random.rand(4, 4).astype("f") var_t = tf.Variable(rnd) slice_v = var_t[2, 0:0] tf.initialize_all_variables().run() self.assertAllClose([2.0], add.eval()) self.assertAllClose([3.0], radd.eval()) self.assertAllClose([1.0], sub.eval()) self.assertAllClose([-1.0], rsub.eval()) self.assertAllClose([20.0], mul.eval()) self.assertAllClose([20.0], rmul.eval()) self.assertAllClose([0.2], div.eval()) self.assertAllClose([5.0], rdiv.eval()) self.assertAllClose([-2.0], neg.eval()) self.assertAllClose([2.0], abs_v.eval()) self.assertAllClose([True], lt.eval()) self.assertAllClose([False], rlt.eval()) self.assertAllClose([True], le.eval()) self.assertAllClose([True], rle.eval()) self.assertAllClose([False], gt.eval()) self.assertAllClose([True], rgt.eval()) self.assertAllClose([True], ge.eval()) self.assertAllClose([True], rge.eval()) self.assertAllClose([6], mod.eval()) self.assertAllClose([3], rmod.eval()) self.assertAllClose([True, False], and_v.eval()) self.assertAllClose([True, True], or_v.eval()) self.assertAllClose([True, False], xor_v.eval()) self.assertAllClose([False, True], invert_v.eval()) self.assertAllClose(rnd[2, 0:0], slice_v.eval())
def smart_mul(a, b): if all_((a, b), lambda x: isinstance(x, set)): return and_(a, b) return mul(a, b)
def get_note_with_id_and_current_user_id(noteId): return Note.query.filter( and_(Note.id == int(noteId), Note.userId == current_user.id)).first()