Пример #1
50
 def opr(operation, op1, op2=()):
     result = ()
     if isinstance(op1, types_Number): 
         # prefix +/-
         if op2==():
             if operation=='+':
                 result = operator.pos(op1)
             elif operation=='-':
                 result = operator.neg(op1)
         elif isinstance(op2, types_Number):
             if operation=='+':
                 result = operator.add(op1,op2)
             elif operation=='-':
                 result = operator.sub(op1,op2)
     # string '+'
     elif operation=='+' and all([isinstance(x,types.StringTypes) for x in (op1,op2)]):
         result = operator.add(op1,op2)
     return result
Пример #2
1
 def visit_UnaryOpExpr(self, node):
     # TODO: handle TypeError 'bad operand type for ...' exceptions
     operand = self.visit(node.operand)
     if isinstance(node.op, nodes.UMinus):
         return operator.neg(self.node_as_value(operand))
     elif isinstance(node.op, nodes.UPlus):
         return operator.pos(self.node_as_value(operand))
     else:
         raise RuntimeError()  # pragma: no cover
Пример #3
0
 def test_neg(self):
     self.failUnlessRaises(TypeError, operator.neg)
     self.failUnlessRaises(TypeError, operator.neg, None)
     self.failUnless(operator.neg(5) == -5)
     self.failUnless(operator.neg(-5) == 5)
     self.failUnless(operator.neg(0) == 0)
     self.failUnless(operator.neg(-0) == 0)
Пример #4
0
 def test_neg(self):
     self.assertRaises(TypeError, operator.neg)
     self.assertRaises(TypeError, operator.neg, None)
     self.assertTrue(operator.neg(5) == -5)
     self.assertTrue(operator.neg(-5) == 5)
     self.assertTrue(operator.neg(0) == 0)
     self.assertTrue(operator.neg(-0) == 0)
Пример #5
0
 def test_neg(self):
     self.failUnlessRaises(TypeError, operator.neg)
     self.failUnlessRaises(TypeError, operator.neg, None)
     self.assertEqual(operator.neg(5), -5)
     self.assertEqual(operator.neg(-5), 5)
     self.assertEqual(operator.neg(0), 0)
     self.assertEqual(operator.neg(-0), 0)
Пример #6
0
 def test_neg(self):
     #operator = self.module
     self.assertRaises(TypeError, operator.neg)
     self.assertRaises(TypeError, operator.neg, None)
     self.assertEqual(operator.neg(5), -5)
     self.assertEqual(operator.neg(-5), 5)
     self.assertEqual(operator.neg(0), 0)
     self.assertEqual(operator.neg(-0), 0)
Пример #7
0
 def test_result(self):
     types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
     with suppress_warnings() as sup:
         sup.filter(RuntimeWarning)
         for dt in types:
             a = np.ones((), dtype=dt)[()]
             assert_equal(operator.neg(a) + a, 0)
Пример #8
0
 def clonesBall( self ):
     # clone ball 1 = rect( x, y, w, h ), ( fpdx, fpdy )
     clone = None
     try:
         xbmcgui.lock()
         for ball in self.balls:
             if not ball.dead:
                 rect = ball.ballControl.getRect()
                 fpd = neg( ball.fpdx ), neg( ball.fpdy )
                 clone = rect, fpd
                 break
     except:
         print_exc()
     xbmcgui.unlock()
     if clone:
         self.new_ball( clone )
def getlocaldata(sms,dr,flst):
    for f in flst:
        fullf = os.path.join(dr,f)
        if os.path.isfile(fullf): 
            tup = (fullf, os.path.getsize(fullf))
            _listOfFiles.append(tup)
    tmp = sorted(_listOfFiles, key=operator.itemgetter(1))
    sms[1] = tmp[operator.neg(sms[0]):] 
def perturb_data(loss_func, optimizer, model, optimal_subset, numEpochs, batchSize,
                 constrainWeight, epsilon, invertTargets, negateLoss,
                 ):
    """
    the model parameter is pre-trained model that minimizes loss. By freezing the weights of all layers except the
    embedding layer and training with either inverted target vector or negated loss, we can perturb the embedding
    layer weights to the input data as a perturbed embedding matrix--the one-hot vector input type cannot be
    perturbed because its values are integers whereas the embedding matrix is real. The embedding layer weight
    matrix functions as a lookup-table so that each row corresponds to a one-hot index--by perturbing the
    weight matrix/lookup-table we can construct perturbed embedding matrix inputs using the original one-hot vector
    input types and mapping them to perturbed embedding matrix input types via the perturbed
    weight matrix/lookup-table

    :param model:
    :param optimal_subset:
    :param loss_func:
    :param optimizer:
    :param numEpochs:
    :param constrainWeight:
    :param epsilon:
    :param invertTargets:
    :param negateLoss:
    :return:
    """

    from operator import neg

    def constrain_fn(embedW, embedW_orig, eps):
        return K.T.nlinalg.trace(K.dot(K.transpose(embedW_orig - embedW), embedW_orig - embedW)) - eps

    # freeze the weights of every layer except embedding layer
    for idx, L in enumerate(model.layers):
        if idx == 0:
            continue
        L.trainable = False

    assert model.layers[0].trainable is True, "embedding layer must be trainable"

    constrain_fn_args = [model.layers[0].W,
                         model.layers[0].W.get_value(), epsilon]

    loss = constrained_loss((lambda ytru, ypred: neg(loss_func(ytru, ypred)) if negateLoss else loss_func(ytru, ypred)),
                            constrain_fn, constrain_fn_args,
                            constraint_weight=constrainWeight)

    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])

    targets = optimal_subset[1]
    onehotVectors = optimal_subset[0]

    if invertTargets:
        targets = np.logical_not(targets)

    callbacks = [Batch_EarlyStopping(verbose=1)]

    model.fit(onehotVectors, targets, nb_epoch=numEpochs, batch_size=batchSize, callbacks=callbacks)

    return model
Пример #11
0
    def score_getter_penalize_overlap_estimated(a, b):
        '''Penalizes overlap, to try to find non-overlapping segments'''
        if a == None or overlap(a,b) == 0:
            return len_aln(b) * (b.pctid / 100.0)
        olap = overlap(a,b)
        olap_penalty = b_olap_matches = olap * (b.pctid / 100.0) * neg(1)
        b_hang_matches = (len_aln(b) - olap) * (b.pctid / 100.0)

        return olap_penalty + b_hang_matches
Пример #12
0
def error(geoprint, radians=False):
    """Returns the error of a given geoprint.

    If radians is true, return the error in radians, otherwise
    degrees.
    """
    e = 90.0 * pow(2, neg(len(geoprint) - 1))
    if radians:
        return rads(e)
    return e
Пример #13
0
def candidate_killer(candidates):
  scoreboard = [] # here we will sort the candidates
  for i, c in enumerate(candidates): # for each candidate in the list of candidates, fill its
    scoreboard.append((len(c),neg(c[-1]),i)) # length, negative of the last item, index in the list
  scoreboard.sort(reverse=True) # reverse sort, longest with smallest last item first (because we use the negative)
  good_candidates = [candidates[scoreboard[0][2]]] # put the first one into the list of good candidates
  best_last = scoreboard[0][1] # keep the information of the smallest last item so far (we use the negative)
  for s in scoreboard: # from the scoreboard
    if s[1] > best_last: # if the last item is smaller than the best known (we compare its negative!)
      good_candidates.append(candidates[s[2]]) # append the candidate into the list of good ones
      best_last = s[1] # and update the smallest last item (negative) 
  return good_candidates # return the list of good candidates
    def solve_pnp_distance(self, box,poses):
        rows = box.shape[0]
        distances = np.zeros((rows,1))
        for a in range(rows):
            pose = poses[:3,a]
            distance = pose[2]
            if distance < 0:
                distance = neg(distance)
            distances = np.resize(distances,(a,1))
            distances = np.append(distances,distance)

        return distances
Пример #15
0
Файл: Time.py Проект: rbe/dmerce
 def DiffInDays(self, d1, d2):
     """ calculate difference between two dates rounded to days """
     d1, d2 = self.__DiffCheck(d1, d2)
     if d1 == 'N/A' or d2 == 'N/A':
         return 'N/A'
     # Calculate difference in seconds
     delta = d1 - d2
     if delta < 0:
         delta = operator.neg(delta)
     # Calculate days
     days = int(round(delta / 60 / 60 / 24))
     if days == 0:
         days = 1
     return days
Пример #16
0
    def score_getter_matching_consensus_estimated(a, b):
        '''Scoring Function for filtering alignments for downstream
        consensus. Overlaps are sort of encouraged '''
        if a == None or overlap(a,b) == 0:
            return len_aln(b) * (b.pctid / 100.0)

        olap = overlap(a,b)
        a_olap_matches = olap * (a.pctid / 100.0) * neg(0.5)
        b_olap_matches = olap * (b.pctid / 100.0) * 0.5
        b_hang_matches = (len_aln(b) - olap) * (b.pctid / 100.0)
        
        score = a_olap_matches + b_olap_matches + b_hang_matches
    
        return score
Пример #17
0
def unary_exp(tokens):
    assert len(tokens)

    if tokens[0] == "-":
        tokens.pop(0)
        value = value_exp(tokens)
        return lambda row, ctx: operator.neg(value(row, ctx))
    elif tokens[0] == "not":
        tokens.pop(0)
        value = value_exp(tokens)
        return lambda row, ctx: operator.not_(value(row, ctx))
    elif tokens[0] == "+":
        tokens.pop(0)

    return value_exp(tokens)
Пример #18
0
Файл: Time.py Проект: rbe/dmerce
 def ActualYear(self, timestamp = None, name = 'ActualYear', selected = None, rangeFrom = None, rangeTo = None, plus = 0, minus = 0, withNull = 0, nullSelected = 0):
     if not timestamp:
         timestamp = time.time()
     else:
         timestamp = self.CareForTimestamp(timestamp)
     if rangeFrom is None:
         rangeFrom = int(time.strftime('%Y', time.localtime(timestamp)))
     elif rangeFrom < 0:
         # actual year - rangeFrom
         rf = int(time.strftime('%Y', time.localtime(timestamp))) - operator.neg(rangeFrom)
         rangeFrom = rf
     if rangeTo is None:
         rangeTo = rangeFrom + 1
     elif rangeTo < 1000:
         rangeTo = int(time.strftime('%Y', time.localtime(timestamp))) + rangeTo
     return self.__GenerateActual(timestamp, name, selected, '%Y', (rangeFrom, rangeTo), plus, minus, withNull, nullSelected)
Пример #19
0
def test_operator_high_cohesive_invoke():
    # f objects can be called as functions
    assert add(1, 2) == 3
    assert sub(7, 3) == 4
    assert mul(3, 7) == 21
    assert truediv(32, 4) == 8
    assert neg(1) == -1

    # works with arbitrary numbers of arguments
    assert f(bool)() is False
    assert f(max)([1]) == 1
    assert f(max)(6, 1) == 6
    assert f(max)(6, 1, 9) == 9
    assert f(max)(*range(10)) == 9

    # works with keyword arguments
    assert f(int)('10', base=16) == 16
Пример #20
0
    def _get_array_size(self, node):
        """Calculate the size of the array."""
        if isinstance(node, c_ast.Constant):
            size = int(node.value)
        elif isinstance(node, c_ast.UnaryOp):
            if node.op == '-':
                size = operator.neg(self._get_array_size(node.expr))
            else:
                size = self._get_array_size(node.expr)
        elif isinstance(node, c_ast.BinaryOp):
            mapping = {'+': operator.add, '-': operator.sub,
                       '*': operator.mul, '/': operator.floordiv}
            left = self._get_array_size(node.left)
            right = self._get_array_size(node.right)
            size = mapping[node.op](left, right)
        else:
            raise ParseError('This type of array not supported: %s' % node)

        return size
Пример #21
0
 def get_project_name_and_entry_id(self, series, feed, entry):
     base_regex = "Episode [\d\.]+ - (.+)"
     rep = re.compile("%s with .+" % base_regex)
     if rep.match(entry.title):
         title = rep.match(entry.title).groups()[0]
     else:
         # The episode title does not contain " with YYY"
         rep = re.compile(base_regex)
         if rep.match(entry.title):
             title = rep.match(entry.title).groups()[0]
         else:
             # No episode number (probably not a project's resource...)
             title = entry.title
     # Strip the following bits if the name ends in it
     for s in (", and more", " and more", ","):
         if title.endswith(s):
             title = title[: neg(len(s))]
     title = title.strip()
     return (title, entry.id)
Пример #22
0
	def __init__(self):

		self.version = None
		self.options = None
		self.function_stack = [] # stack of functions called, from top-level to currently executing one
		
		# namespace includes variables and rules
		self.namespace = {} # Will be hash list of input files of whatever textual content
		self.namespace['report'] = OrderedDict()
		self.namespace['report']['title'] = "RCQC Quality Control Report"			
		self.namespace['report']['tool_version'] = CODE_VERSION	
		self.namespace['report']['job'] = {'status': 'ok'}
		self.namespace['report']['quality_control'] =  {'status': 'ok'}

		self.namespace['sections'] = []
		self.namespace['rule_index'] = {} # rule index based on location of store(_, location) field. 1 per.
		self.namespace['name_index'] = {} # index based on last (z) key of x.y.z namespace reference.
		self.namespace['files'] = [] 
		self.namespace['file_names'] = {} 
		self.namespace['iterator'] = {} # Provides the dictionary for each current iterator function evaluation (at call depth). 
		self.namespace['report_html'] = ''	

		self.input_file_paths = None	
		self.ruleset_file_path = None	
		self.output_json_file = None	

		# Really core functions below require access to RCQC class variables.  
		# Other functions can be added in rcqc_functions RCQCClassFnExtension and RCQCStaticFnExtension classes.
		self.functions = {
			'=': lambda location, value: self.storeNamespaceValue(value, location),
			'store': self.storeNamespaceValue, 
			'store_array': self.storeNamespaceValueAsArray,
			'if': self.fnIf,
			'fail': self.fail,
			'exit': self.exit,
			'exists': lambda location: self.namespaceReadValue(location, True),
			'-': lambda x: operator.neg(x),
			'not': lambda x: operator.not_(x),
			'function': lambda x: self.applyRules(x)
		}
Пример #23
0
Файл: misc.py Проект: rbe/dmerce
 def diff(self, d1, d2):
   # Check d1 and d2 for type; if string (no timestamp) convert
   if not d1 or not d2:
     return 'N/A'
   if type(d1) is types.StringType:
     if d1[0] == '0':
       return 'N/A'
     if self.isISODate(d1):
       d1 = self.isoToTimestamp(d1)
   if type(d2) is types.StringType:
     if d2[0] == '0':
       return 'N/A'
     if self.isISODate(d2):
       d2 = self.isoToTimestamp(d2)
   # Calculate difference in seconds
   delta = d1 - d2
   if delta < 0:
     delta = operator.neg(delta)
   # Calculate days
   days = int(round(delta / 60 / 60 / 24))
   if days == 0:
     days = 1
   return days
Пример #24
0
	def __neg__(self): return Quaternion([operator.neg(c) for c in self])
	def __pos__(self): return Quaternion([operator.pos(c) for c in self])
Пример #25
0
 def __neg__(self):
     return self.__class__(operator.neg(self._magnitude), self._units)
Пример #26
0
print()

# pop
print(d.pop("dob"))
print(d)

#---------------------------------- Operators ---------------------------------
print("\n" * 3, "\t" * 2, "Operators\n\n" * 1)

import operator
print("pow(5,5)\t\t", operator.pow(5, 5))
print("abs(33)\t\t", operator.abs(33))
print("add(2,3)\t\t", operator.add(2, 3))
print("eq(2,2)\t\t", operator.eq(2, 2))
print("mod(23,8)\t\t", operator.mod(23, 8))
print("neg(66)\t\t", operator.neg(66))
print("xor(22,55)\t\t", operator.xor(22, 55))
print("sub(55,77)\t\t", operator.sub(55, 77))
print()
# != < > == >= <= and or not

# identity operator--> is / is not
print(2 is 2)
print(2 is not 2)

#---------------------------------- Operator Overloading ---------------------------------
print("\n" * 3, "\t" * 2, "Operator Overloading\n\n" * 1)

# + (Strings)
x = "3"  #input()
y = "23"  #input()
    def run_model(self):

        ## initialize data structure

        self.res = np.zeros([
            self.duration,
            len(MODEL_RUN_COLUMNS) + len(EXPORT_COLUMNS_FOR_CSV)
        ],
                            dtype=np.float32)

        self.res[0, 0] = self.nf1
        self.res[0, 1] = self.nf2
        self.res[0, 2] = self.nf3
        self.res[0, 3] = self.nm1
        self.res[0, 4] = self.nm2
        self.res[0, 5] = self.nm3
        self.res[0, 6] = 0
        self.res[0, 7] = 0
        self.res[0, 8] = 0
        self.res[0, 9] = self.female_promotion_probability_1
        self.res[0, 10] = self.female_promotion_probability_2
        self.res[0, 11] = np.float32(
            sum(list([self.nf1, self.nf2, self.nf3])) / sum(
                list([
                    self.nf1, self.nf2, self.nf3, self.nm1, self.nm2, self.nm3
                ])))
        self.res[0, 12] = 0
        self.res[0, 13] = self.res[0, 0:6].sum()
        self.res[0, 14:] = 0

        # I assign the state variables to temporary variables. That way I
        # don't have to worry about overwriting the original state variables.

        hiring_rate_female_level_1 = self.bf1
        hiring_rate_female_level_2 = self.bf2
        hiring_rate_female_level_3 = self.bf3
        attrition_rate_female_level_1 = self.df1
        attrition_rate_female_level_2 = self.df2
        attrition_rate_female_level_3 = self.df3
        attrition_rate_male_level_1 = self.dm1
        attrition_rate_male_level_2 = self.dm2
        attrition_rate_male_level_3 = self.dm3
        probability_of_outside_hire_level_3 = self.phire3
        probability_of_outside_hire_level_2 = self.phire2
        female_promotion_probability_1_2 = self.female_promotion_probability_1
        female_promotion_probability_2_3 = self.female_promotion_probability_2
        department_size_upper_bound = self.upperbound
        department_size_lower_bound = self.lowerbound
        variation_range = self.variation_range
        unfilled_vacanies = 0
        change_to_level_1 = 0
        change_to_level_2 = 0
        change_to_level_3 = 0

        for i in range(1, self.duration):
            # initialize variables for this iteration

            prev_number_of_females_level_1 = self.res[i - 1, 0]
            prev_number_of_females_level_2 = self.res[i - 1, 1]
            prev_number_of_females_level_3 = self.res[i - 1, 2]
            prev_number_of_males_level_1 = self.res[i - 1, 3]
            prev_number_of_males_level_2 = self.res[i - 1, 4]
            prev_number_of_males_level_3 = self.res[i - 1, 5]
            prev_number_of_vacancies_level_3 = self.res[i - 1, 6]
            prev_number_of_vacancies_level_2 = self.res[i - 1, 7]
            prev_number_of_vacancies_level_1 = self.res[i - 1, 8]
            prev_promotion_rate_female_level_1 = self.female_promotion_probability_1
            prev_promotion_rate_female_level_2 = self.female_promotion_probability_2
            department_size = self.res[i - 1, 0:6].sum()

            # Process Model

            # Determine department size variation for this timestep

            # first both female and males leave the department according to binomial probability.

            female_attrition_level_3 = binomial(prev_number_of_females_level_3,
                                                attrition_rate_female_level_3)

            male_attrition_level_3 = binomial(prev_number_of_males_level_3,
                                              attrition_rate_male_level_3)

            # the departures create a set of vacancies. These vacancies are the basis for new hiring
            total_vacancies_3 = female_attrition_level_3 + \
                                male_attrition_level_3 + change_to_level_3

            # women are hired first and then men
            hiring_female_3 = binomial(
                max(0,
                    total_vacancies_3), probability_of_outside_hire_level_3 *
                hiring_rate_female_level_3)

            hiring_male_3 = binomial(
                max(0, total_vacancies_3 - hiring_female_3),
                probability_of_outside_hire_level_3 *
                (1 - hiring_rate_female_level_3))

            total_hiring_3 = hiring_female_3 + hiring_male_3

            # level 3 vacancies that are not filled by new hires create opportunities
            # for promotion from level 2. Again women are promoted first and men second.
            # Also note the error trap that if we try to promote more professors from
            # level 2 than there exist at level 2, then we will prevent this from happening.

            vacancies_remaining_after_hiring_3 = total_vacancies_3 - total_hiring_3

            potential_promotions_after_hiring_3 = max(
                0, vacancies_remaining_after_hiring_3)

            promotions_of_females_level_2_3 = binomial(
                min(potential_promotions_after_hiring_3,
                    prev_number_of_females_level_2),
                female_promotion_probability_2_3)

            promotions_of_males_level_2_3 = binomial(
                max(
                    0,
                    min(
                        vacancies_remaining_after_hiring_3 -
                        promotions_of_females_level_2_3,
                        prev_number_of_males_level_2)),
                1 - female_promotion_probability_2_3)

            # attrition at level 2 - either people leave from attrition or promotion

            female_attrition_level_2 = binomial(
                max(
                    0, prev_number_of_females_level_2 -
                    promotions_of_females_level_2_3),
                attrition_rate_female_level_2)

            male_attrition_level_2 = binomial(
                max(
                    0, prev_number_of_males_level_2 -
                    promotions_of_males_level_2_3),
                attrition_rate_male_level_2)

            # the departures create a set of vacancies. These vacancies are the basis for new hiring
            total_vacancies_2 = sum(
                list([
                    female_attrition_level_2, male_attrition_level_2,
                    promotions_of_females_level_2_3,
                    promotions_of_males_level_2_3, change_to_level_2
                ]))

            hiring_female_2 = binomial(
                max(0,
                    total_vacancies_2), probability_of_outside_hire_level_2 *
                hiring_rate_female_level_2)
            hiring_male_2 = binomial(
                max(0, total_vacancies_2 - hiring_female_2),
                probability_of_outside_hire_level_2 *
                (1 - hiring_rate_female_level_2))

            total_hiring_2 = hiring_female_2 + hiring_male_2

            vacancies_remaining_after_hiring_2 = total_vacancies_2 - total_hiring_2

            potential_promotions_after_hiring_2 = max(
                0, vacancies_remaining_after_hiring_2)

            promotions_of_females_level_1_2 = binomial(
                max(
                    0,
                    min(potential_promotions_after_hiring_2,
                        prev_number_of_females_level_1)),
                female_promotion_probability_1_2)

            promotions_of_males_level_1_2 = binomial(
                max(
                    0,
                    min(
                        vacancies_remaining_after_hiring_2 -
                        promotions_of_females_level_1_2,
                        prev_number_of_females_level_1)),
                probability_of_outside_hire_level_2 *
                (1 - female_promotion_probability_1_2))

            ## Level 1

            female_attrition_level_1 = binomial(
                max(
                    0, prev_number_of_females_level_1 -
                    promotions_of_females_level_1_2),
                attrition_rate_female_level_1)

            male_attrition_level_1 = binomial(
                max(
                    0, prev_number_of_males_level_1 -
                    promotions_of_males_level_1_2),
                attrition_rate_male_level_1)

            total_vacancies_1 = sum(
                list([
                    female_attrition_level_1, male_attrition_level_1,
                    promotions_of_females_level_1_2,
                    promotions_of_males_level_1_2, change_to_level_1
                ]))

            hiring_female_1 = binomial(max(0, total_vacancies_1),
                                       hiring_rate_female_level_1)

            hiring_male_1 = binomial(
                max(0, total_vacancies_1 - hiring_female_1),
                1 - hiring_rate_female_level_1)

            # Write state variables to array and move to next iteration

            self.res[i, 0] = number_of_females_level_1 = sum(
                list([
                    prev_number_of_females_level_1,
                    neg(female_attrition_level_1),
                    neg(promotions_of_females_level_1_2), hiring_female_1
                ]))

            assert (number_of_females_level_1 >=
                    0), "negative number of females 1"

            self.res[i, 1] = number_of_females_level_2 = max(
                0,
                sum(
                    list([
                        prev_number_of_females_level_2,
                        neg(female_attrition_level_2),
                        neg(promotions_of_females_level_2_3),
                        promotions_of_females_level_1_2, hiring_female_2
                    ])))

            self.res[i, 2] = number_of_females_level_3 = sum(
                list([
                    prev_number_of_females_level_3,
                    neg(female_attrition_level_3),
                    promotions_of_females_level_2_3, hiring_female_3
                ]))

            self.res[i, 3] = number_of_males_level_1 = sum(
                list([
                    prev_number_of_males_level_1,
                    neg(male_attrition_level_1),
                    neg(promotions_of_males_level_1_2), hiring_male_1
                ]))

            self.res[i, 4] = number_of_males_level_2 = sum(
                list([
                    prev_number_of_males_level_2,
                    neg(male_attrition_level_2),
                    neg(promotions_of_males_level_2_3),
                    promotions_of_males_level_1_2, hiring_male_2
                ]))

            self.res[i, 5] = number_of_males_level_3 = sum(
                list([
                    prev_number_of_males_level_3,
                    neg(male_attrition_level_3), promotions_of_males_level_2_3,
                    hiring_male_3
                ]))

            self.res[i, 6] = sum(
                list([male_attrition_level_3, female_attrition_level_3]))

            self.res[i, 7] = sum(
                list([
                    male_attrition_level_2, female_attrition_level_2,
                    promotions_of_females_level_2_3,
                    promotions_of_males_level_2_3
                ]))

            self.res[i, 8] = sum(
                list([
                    male_attrition_level_1, female_attrition_level_1,
                    promotions_of_males_level_1_2,
                    promotions_of_females_level_1_2
                ]))

            self.res[i, 9] = self.female_promotion_probability_1
            self.res[i, 10] = self.female_promotion_probability_2
            self.res[i, 11] = np.float32(
                truediv(
                    sum(
                        list([
                            number_of_females_level_1,
                            number_of_females_level_2,
                            number_of_females_level_3
                        ])),
                    sum(
                        list([
                            number_of_females_level_1,
                            number_of_females_level_2,
                            number_of_females_level_3, number_of_males_level_1,
                            number_of_males_level_2, number_of_males_level_3
                        ]))))
            unfilled_vacanies = abs(department_size - self.res[i, 0:6].sum())

            self.res[i, 12] = unfilled_vacanies
            department_size = self.res[i, 0:6].sum()
            self.res[i, 13] = department_size
            self.res[i, 14] = hiring_female_3
            self.res[i, 15] = hiring_male_3
            self.res[i, 16] = hiring_female_2
            self.res[i, 17] = hiring_male_2
            self.res[i, 18] = hiring_female_1
            self.res[i, 19] = hiring_male_1
            self.res[i, 20] = 0
            self.res[i, 21] = 0
            self.res[i, 22] = promotions_of_females_level_2_3
            self.res[i, 23] = promotions_of_males_level_2_3
            self.res[i, 24] = promotions_of_females_level_1_2
            self.res[i, 25] = promotions_of_males_level_1_2
            self.res[i, 26] = hiring_rate_female_level_1
            self.res[i, 27] = hiring_rate_female_level_2
            self.res[i, 28] = hiring_rate_female_level_3
            self.res[i, 29] = 1 - hiring_rate_female_level_1
            self.res[i, 30] = 1 - hiring_rate_female_level_2
            self.res[i, 31] = 1 - hiring_rate_female_level_3
            self.res[i, 32] = attrition_rate_female_level_1
            self.res[i, 33] = attrition_rate_female_level_2
            self.res[i, 34] = attrition_rate_female_level_3
            self.res[i, 35] = attrition_rate_male_level_1
            self.res[i, 36] = attrition_rate_male_level_2
            self.res[i, 37] = attrition_rate_male_level_3
            self.res[i, 38] = 1
            self.res[i, 39] = probability_of_outside_hire_level_2
            self.res[i, 40] = probability_of_outside_hire_level_3
            self.res[i, 41] = female_promotion_probability_1_2
            self.res[i, 42] = female_promotion_probability_2_3
            self.res[i, 43] = 1 - female_promotion_probability_1_2
            self.res[i, 44] = 1 - female_promotion_probability_2_3
            self.res[i, 45] = department_size_upper_bound
            self.res[i, 46] = department_size_lower_bound
            self.res[i, 47] = variation_range
            self.res[i, 48] = self.duration

            # this produces an array of values. Then I need to assign the
            # values to levels. So if I have say a range of variation of 5. I
            #  will get something like [-1,0,1,-1,0] or something. I need to
            # turn this into something like [2,-1,0]. That means randomly
            # assigning the values in the array to levels.

            flag = False
            while flag == False:

                changes = np.random.choice([-1, 0, 1], variation_range)

                levels = np.random.choice([1, 2, 3], variation_range)  #
                # random level
                # choice

                # need to test whether the candidate changes keep the
                # department size within bounds.
                # print(["old dept size:", department_size,
                #        "new dept size:", self.res[i, 0:6].sum(),
                #        "candidate:", department_size +
                #        changes.sum(),
                #        " added postions: ", changes.sum(),
                #        "unfilled ", unfilled_vacanies])
                if (department_size + changes.sum() <=
                        department_size_upper_bound
                        and department_size + changes.sum() >=
                        department_size_lower_bound):
                    change_to_level_3 = np.int(
                        changes[np.where(levels == 3)[0]].sum())
                    change_to_level_2 = np.int(
                        changes[np.where(levels == 2)[0]].sum())
                    change_to_level_1 = np.int(
                        changes[np.where(levels == 1)[0]].sum())
                    flag = True

                if (department_size > department_size_upper_bound):
                    change_to_level_3 = 0
                    change_to_level_2 = 0
                    change_to_level_1 = 0

                    flag = True

                if department_size < department_size_lower_bound:
                    changes = np.ones(variation_range)
                    change_to_level_3 = np.int(
                        changes[np.where(levels == 3)[0]].sum())
                    change_to_level_2 = np.int(
                        changes[np.where(levels == 2)[0]].sum())
                    change_to_level_1 = np.int(
                        changes[np.where(levels == 1)[0]].sum())
                    flag = True

        df_ = pd.DataFrame(self.res)
        df_.columns = MODEL_RUN_COLUMNS + EXPORT_COLUMNS_FOR_CSV

        recarray_results = df_.to_records(index=True)
        self.run = recarray_results
        return recarray_results
Пример #28
0
 def __neg__(self):
   return NonStandardInteger(operator.neg(self.val))
Пример #29
0
 def __neg__(self):
     return Coordinates(operator.neg(self.x), operator.neg(self.y),
                        operator.neg(self.z))
Пример #30
0
 def negate_usecase(x):
     return operator.neg(x)
Пример #31
0
 def get_score(self):
     from math import sin, cos
     from operator import add, sub, mul, neg
     div = lambda a, b: a / b if b != 0 else 1 # protected division operator
     return div(self.mbs, neg(mul(self.ed, div(self.ls, self.mbs)))) + self.ks * Feature.KS_MAGNIFIER
Пример #32
0
 def __neg__(self):
     return Vec2D(operator.neg(self.x), operator.neg(self.y))
Пример #33
0
 def negate_usecase(x):
     return operator.neg(x)
Пример #34
0
    def run_parameter_sweep(self, number_of_runs, param, llim, ulim,
                            num_of_steps):
        '''
        This function sweeps a single parameter and captures the effect of
        that variation on the overall model. Any valid parameter can be chosen.

        :param number_of_runs: The number of model iterations per parameter
        value
        :type number_of_runs: int
        :param param: The name of the parameter to sweep
        :type param: basestring
        :param llim: lower limit of the parameter value
        :type llim: float
        :param ulim: upper limit of the parameter value
        :type ulim: float
        :param num_of_steps: total number of increments in the range between
        the upper and lower limits.
        :type num_of_steps: int
        :return: a Dataframe containing individual model runs using the
        parameter increments
        :rtype: pandas Dataframe
        '''

        # First I will create a structured array to hold the results of the
        # simulation. The size of the array should be one for each step in the
        # parameter sweep. To calculate that,

        parameter_sweep_increments = np.linspace(llim, ulim, num_of_steps)

        parameter_sweep_results = pd.DataFrame(
            np.zeros([
                len(parameter_sweep_increments),
                len(RESULTS_COLUMNS + FEMALE_MATRIX_COLUMNS[1:]) + 1
            ]))

        parameter_sweep_results.columns = ['increment'] + RESULTS_COLUMNS + \
                                          FEMALE_MATRIX_COLUMNS[1:]
        parameter_sweep_results.loc[:,
                                    'increment'] = parameter_sweep_increments

        # Run simulations with parameter increments and collect into a container.

        for i, val in enumerate(parameter_sweep_increments):
            setattr(self, param, val)
            self.run_multiple(number_of_runs)

            # Sets the values the sweep data matrix to the last values in the
            #  multiple runs results_matrix.
            parameter_sweep_results.iloc[i, 1:neg(len(FEMALE_MATRIX_COLUMNS)) -
                                         1] = self.results_matrix.tail(1).iloc[
                                             0, 1:-1]

            # Sets the year in the sweep data matrix to the last year in the
            # results_matrix.
            parameter_sweep_results.iloc[i, 1] = self.results_matrix.tail(
                1).iloc[0, len(RESULTS_COLUMNS)]

            # Fills the sweep matrix with data from the female percentage
            # matrices
            parameter_sweep_results.iloc[i, len(RESULTS_COLUMNS) + 1:] = \
                self.pct_female_matrix.tail(1).iloc[0, 1:]

        self.parameter_sweep_results = parameter_sweep_results

        # BEGIN BLOCK
        # Reset the models to original settings. This is very important,
        # otherwise settings from the parameter sweep will contaminate
        # subsequent runs of the model.

        self.load_baseline_data_mgmt()
        # END BLOCK

        return (0)
Пример #35
0
 def __neg__(self) -> bool:
     return operator.neg(self.size)
Пример #36
0
    def run_model(self):

        ## initialize data structure

        self.res = np.zeros([self.duration, 12], dtype=np.float32)

        self.res[0, 0] = self.nf1
        self.res[0, 1] = self.nf2
        self.res[0, 2] = self.nf3
        self.res[0, 3] = self.nm1
        self.res[0, 4] = self.nm2
        self.res[0, 5] = self.nm3
        self.res[0, 6] = self.vac3
        self.res[0, 7] = self.vac2
        self.res[0, 8] = self.vac1
        self.res[0, 9] = self.female_promotion_probability_1
        self.res[0, 10] = self.female_promotion_probability_2
        self.res[0, 11] = np.float32(
            sum(list([self.nf1, self.nf2, self.nf3])) / sum(
                list([
                    self.nf1, self.nf2, self.nf3, self.nm1, self.nm2, self.nm3
                ])))

        hiring_rate_female_level_1 = self.bf1
        hiring_rate_female_level_2 = self.bf2
        hiring_rate_female_level_3 = self.bf3
        attrition_rate_female_level_1 = self.df1
        attrition_rate_female_level_2 = self.df2
        attrition_rate_female_level_3 = self.df3
        attrition_rate_male_level_1 = self.dm1
        attrition_rate_male_level_2 = self.dm2
        attrition_rate_male_level_3 = self.dm3
        probability_of_outside_hire_level_3 = self.phire3
        probability_of_outside_hire_level_2 = self.phire2
        male_promotion_probability_1_2 = self.male_promotion_probability_1
        male_promotion_probability_2_3 = self.male_promotion_probability_2
        for i in range(1, self.duration):
            # initialize variables for this iteration

            prev_number_of_females_level_1 = self.res[i - 1, 0]
            prev_number_of_females_level_2 = self.res[i - 1, 1]
            prev_number_of_females_level_3 = self.res[i - 1, 2]
            prev_number_of_males_level_1 = self.res[i - 1, 3]
            prev_number_of_males_level_2 = self.res[i - 1, 4]
            prev_number_of_males_level_3 = self.res[i - 1, 5]
            prev_number_of_vacancies_level_3 = self.res[i - 1, 6]
            prev_number_of_vacancies_level_2 = self.res[i - 1, 7]
            prev_number_of_vacancies_level_1 = self.res[i - 1, 8]
            prev_promotion_rate_female_level_1 = self.female_promotion_probability_1
            prev_promotion_rate_female_level_2 = self.female_promotion_probability_2
            if np.isnan(prev_promotion_rate_female_level_1):
                prev_promotion_rate_female_level_1 = 0
            if np.isnan(prev_promotion_rate_female_level_2):
                prev_promotion_rate_female_level_2 = 0
            prev_gender_proportion_of_department = np.float32(
                sum(
                    list([
                        prev_number_of_females_level_1,
                        prev_number_of_females_level_2,
                        prev_number_of_females_level_3
                    ])) / (sum(
                        list([
                            prev_number_of_females_level_1,
                            prev_number_of_females_level_2,
                            prev_number_of_females_level_3,
                            prev_number_of_males_level_1,
                            prev_number_of_males_level_2,
                            prev_number_of_males_level_3
                        ]))))

            # Process Model

            # first both female and males leave the department according to binomial probability.

            female_attrition_level_3 = binomial(prev_number_of_females_level_3,
                                                attrition_rate_female_level_3)

            male_attrition_level_3 = binomial(prev_number_of_males_level_3,
                                              attrition_rate_male_level_3)

            # the departures create a set of vacancies. These vacancies are the basis for new hiring
            total_vacancies_3 = female_attrition_level_3 + male_attrition_level_3

            # women are hired first and then men

            hiring_female_3 = binomial(
                total_vacancies_3, probability_of_outside_hire_level_3 *
                hiring_rate_female_level_3)
            hiring_male_3 = binomial(
                max(0, total_vacancies_3 - hiring_female_3),
                probability_of_outside_hire_level_3 *
                (1 - hiring_rate_female_level_3))

            # promotion after hiring level 3

            promotions_female_after_hiring_2_3 = binomial(
                max(prev_number_of_females_level_2,
                    total_vacancies_3 - hiring_female_3 - hiring_male_3),
                prev_promotion_rate_female_level_2)
            # formula should read that either the remaining vacancies or the previous number of males--whichever is smallest. But need to make sure no negative values.
            promotions_of_males_level_2_3 = binomial(
                min(
                    prev_number_of_males_level_2,
                    max(
                        0, total_vacancies_3 - hiring_female_3 -
                        hiring_male_3 - promotions_female_after_hiring_2_3)),
                male_promotion_probability_2_3)
            assert (promotions_of_males_level_2_3 >=
                    0), "promotions_of_males_level_2_3 is negative"

            # attrition at level 2 - either people leave from attrition or promotion

            female_attrition_level_2 = binomial(
                max(
                    0, prev_number_of_females_level_2 -
                    promotions_female_after_hiring_2_3),
                attrition_rate_female_level_2)

            male_attrition_level_2 = binomial(
                max(
                    0, prev_number_of_males_level_2 -
                    promotions_of_males_level_2_3),
                attrition_rate_male_level_2)

            # the departures create a set of vacancies. These vacancies are the basis for new hiring
            total_vacancies_2 = sum(
                list([
                    female_attrition_level_2, male_attrition_level_2,
                    promotions_female_after_hiring_2_3,
                    promotions_of_males_level_2_3
                ]))
            assert (total_vacancies_2 >=
                    0), "total vacancies level 2 is less than zero"

            # TODO set to hiring first

            hiring_female_2 = binomial(
                max(0,
                    total_vacancies_2), probability_of_outside_hire_level_2 *
                hiring_rate_female_level_2)

            hiring_male_2 = binomial(
                max(0, total_vacancies_2 - hiring_female_2),
                1 - probability_of_outside_hire_level_2 *
                hiring_rate_female_level_2)

            promotions_of_females_level_1_2 = binomial(
                min(
                    0,
                    min(prev_number_of_females_level_1,
                        total_vacancies_2 - hiring_female_2 - hiring_male_2)),
                prev_promotion_rate_female_level_1)

            promotions_of_males_level_1_2 = binomial(
                min(
                    0,
                    min(
                        prev_number_of_males_level_1,
                        total_vacancies_2 - hiring_female_2 - hiring_male_2 -
                        promotions_of_females_level_1_2)),
                male_promotion_probability_1_2)

            assert (promotions_of_females_level_1_2 >=
                    0), "promotions of females level 1-2 is negative"
            assert (promotions_of_males_level_1_2 >=
                    0), "promotions of males level 1-2 is negative"

            total_hiring_2 = hiring_female_2 + hiring_male_2

            ## Level 1

            female_attrition_level_1 = binomial(
                max(
                    0, prev_number_of_females_level_1 -
                    promotions_of_females_level_1_2),
                attrition_rate_female_level_1)

            male_attrition_level_1 = binomial(
                max(0, prev_number_of_males_level_1),
                attrition_rate_male_level_1)

            total_vacancies_1 = sum(
                list([
                    female_attrition_level_1, male_attrition_level_1,
                    promotions_of_females_level_1_2,
                    promotions_of_males_level_1_2
                ]))

            hiring_female_1 = binomial(total_vacancies_1,
                                       hiring_rate_female_level_1)
            hiring_male_1 = binomial(
                max(0, total_vacancies_1 - hiring_female_1),
                1 - hiring_rate_female_level_1)

            # Write state variables to array and move to next iteration

            self.res[i, 0] = number_of_females_level_1 = sum(
                list([
                    prev_number_of_females_level_1,
                    neg(female_attrition_level_1),
                    neg(promotions_of_females_level_1_2), hiring_female_1
                ]))

            self.res[i, 1] = number_of_females_level_2 = max(
                0,
                sum(
                    list([
                        prev_number_of_females_level_2,
                        neg(female_attrition_level_2),
                        neg(promotions_female_after_hiring_2_3),
                        promotions_of_females_level_1_2, hiring_female_2
                    ])))
            self.res[i, 2] = number_of_females_level_3 = sum(
                list([
                    prev_number_of_females_level_3,
                    neg(female_attrition_level_3),
                    promotions_female_after_hiring_2_3, hiring_female_3
                ]))

            self.res[i, 3] = number_of_males_level_1 = sum(
                list([
                    prev_number_of_males_level_1,
                    neg(male_attrition_level_1),
                    neg(promotions_of_males_level_1_2), hiring_male_1
                ]))

            self.res[i, 4] = number_of_males_level_2 = sum(
                list([
                    prev_number_of_males_level_2,
                    neg(male_attrition_level_2),
                    neg(promotions_of_males_level_2_3),
                    promotions_of_males_level_1_2, hiring_male_2
                ]))

            self.res[i, 5] = number_of_males_level_3 = sum(
                list([
                    prev_number_of_males_level_3,
                    neg(male_attrition_level_3), promotions_of_males_level_2_3,
                    hiring_male_3
                ]))

            self.res[i, 6] = number_of_vacancies_level_3 = sum(
                list([male_attrition_level_3, female_attrition_level_3]))

            self.res[i, 7] = number_of_vacancies_level_2 = sum(
                list([
                    male_attrition_level_2, female_attrition_level_2,
                    promotions_female_after_hiring_2_3,
                    promotions_of_males_level_2_3
                ]))

            self.res[i, 8] = number_of_vacancies_level_1 = sum(
                list([
                    male_attrition_level_1, female_attrition_level_1,
                    promotions_of_males_level_1_2,
                    promotions_of_females_level_1_2
                ]))

            self.res[
                i,
                9] = promotion_rate_female_level_1 = self.female_promotion_probability_1
            self.res[
                i,
                10] = promotion_rate_women_level_2 = self.female_promotion_probability_2
            self.res[i, 11] = gender_proportion_of_department = np.float32(
                truediv(
                    sum(
                        list([
                            number_of_females_level_1,
                            number_of_females_level_2,
                            number_of_females_level_3
                        ])),
                    sum(
                        list([
                            number_of_females_level_1,
                            number_of_females_level_2,
                            number_of_females_level_3, number_of_males_level_1,
                            number_of_males_level_2, number_of_males_level_3
                        ]))))

        # print(self.res[i,:])
        ## Print Data matrix

        df_ = pd.DataFrame(self.res)
        df_.columns = [
            'f1', 'f2', 'f3', 'm1', 'm2', 'm3', 't3', 't2', 't1', 'prom1',
            'prom2', 'gendprop'
        ]

        recarray_results = df_.to_records(index=True)
        self.run = recarray_results
        return recarray_results
Пример #37
0
#// 只能用于整数
#浮点数只能返回一位小数0
print operator.floordiv(9, 4)
print operator.floordiv(9.04, 4)

#除数取余 同a%b
print operator.mod(9, 4)

#精确除法
print operator.truediv(9.04, 4)

#绝对值
print operator.abs(-10)

#取反 相当于 -a
print operator.neg(-10)

#取反 相当于 ~a
#~a = (-a)-1
#~10 = -11
#~(-10) = 9
print operator.inv(10)
print operator.inv(-10)
print operator.invert(10)
print operator.invert(-10)

#乘方 同a**b
print operator.pow(2, 3)

#向左移位 同<< 相当于乘以2的相应次方
print operator.lshift(3, 2)
Пример #38
0
 def __neg__(self):
     return self.__class__(operator.neg(self._magnitude), self._units)
Пример #39
0
def test_metrics_neg():
    first_metric = DummyMetric(1)

    final_neg = neg(first_metric)
    assert isinstance(final_neg, CompositionalMetric)
    assert torch.allclose(tensor(-1), final_neg.compute())
Пример #40
0
 def test_neg_array(self, xp, dtype):
     a = testing.shaped_arange((2, 3), xp, dtype)
     return operator.neg(a)
Пример #41
0
def op_neg(x):
    return _op.neg(x)
Пример #42
0
 def test_neg_zerodim(self, xp, dtype):
     a = xp.array(-2, dtype)
     return operator.neg(a)
Пример #43
0
 def __neg__(self):
     return vec(operator.neg(self.x), operator.neg(self.y))
Пример #44
0
print(" bit or ", oct(10), oct(4))
print(" |           : ", 10 | 4)
print(" or_(a, b)   : ", op.or_(10,4))
print("x.__or__(y)  : ", (10).__or__(4))
print(" bit xor ", oct(10), oct(6))
print(" ^            : ", 10 ^ 6)
print(" xor(a, b)    : ", op.xor(10,6))
print("x.__xor__(y)  : ", (10).__xor__(6))

print(" bit inversion ", oct(10))
print(" ~           : ", ~(10) )
print(" invert(a)   : ", op.invert(10))
print("x.__invert__(y)  : ", (10).__invert__())

print(" negative : ", -(10))
print(" neg(a)   : ", op.neg((10)))
print("a.__neg__   : ", (10).__neg__())
print(" positive : ", -(-10), +(10))
print(" pos(a)   : ", op.pos((10)))
print("a.__pos__   : ", (10).__pos__())

print(" right hand operator ")
print(" x + y ", (8).__radd__(2))
print(" x + y ", (2).__add__(8))

print(" x ** y ", (3).__rpow__(2))
print(" x ** y ", (2).__pow__(3))

x = True
y = False
print('x and y is',x and y)
Пример #45
0
 def test_neg(self):
     self.failUnless(operator.neg(5) == -5)
     self.failUnless(operator.neg(-5) == 5)
     self.failUnless(operator.neg(0) == 0)
     self.failUnless(operator.neg(-0) == 0)
Пример #46
0
 def __neg__(self):
     return neg(self)
Пример #47
0
def neg_abs(num):
    return neg(abs(num))
Пример #48
0
def operator_neg(size):
    a = Array(size, 'int32')
    for i in range(size):
        a[i] = nb_types.int32(i)
    return operator.neg(a)
Пример #49
0
 def __neg__(self) -> T:
     return operator.neg(object.__getattribute__(self, "_obj"))
Пример #50
0
def dom_j_up(csp, queue):
    return SortedSet(queue, key=lambda t: neg(len(csp.curr_domains[t[1]])))
Пример #51
0
	def __neg__   (self): return Euler([operator.neg(c) for c in self])
	def __pos__   (self): return Euler([operator.pos(c) for c in self])
    def run_model(self):

        ## initialize data structure

        self.res = np.zeros([self.duration, 12], dtype=np.float32)

        self.res[0, 0] = self.nf1
        self.res[0, 1] = self.nf2
        self.res[0, 2] = self.nf3
        self.res[0, 3] = self.nm1
        self.res[0, 4] = self.nm2
        self.res[0, 5] = self.nm3
        self.res[0, 6] = self.vac3
        self.res[0, 7] = self.vac2
        self.res[0, 8] = self.vac1
        self.res[0, 9] = self.female_promotion_probability_1
        self.res[0, 10] = self.female_promotion_probability_2
        self.res[0, 11] = np.float32(
            sum(list([self.nf1, self.nf2, self.nf3])) / sum(
                list([
                    self.nf1, self.nf2, self.nf3, self.nm1, self.nm2, self.nm3
                ])))

        hiring_rate_female_level_1 = self.bf1
        hiring_rate_female_level_2 = self.bf2
        hiring_rate_female_level_3 = self.bf3
        attrition_rate_female_level_1 = self.df1
        attrition_rate_female_level_2 = self.df2
        attrition_rate_female_level_3 = self.df3
        attrition_rate_male_level_1 = self.dm1
        attrition_rate_male_level_2 = self.dm2
        attrition_rate_male_level_3 = self.dm3
        probability_of_outside_hire_level_3 = self.phire3
        probability_of_outside_hire_level_2 = self.phire2
        male_promotion_probability_1_2 = self.male_promotion_probability_1
        male_promotion_probability_2_3 = self.male_promotion_probability_2

        for i in range(1, self.duration):
            # initialize variables for this iteration

            prev_number_of_females_level_1 = self.res[i - 1, 0]
            prev_number_of_females_level_2 = self.res[i - 1, 1]
            prev_number_of_females_level_3 = self.res[i - 1, 2]
            prev_number_of_males_level_1 = self.res[i - 1, 3]
            prev_number_of_males_level_2 = self.res[i - 1, 4]
            prev_number_of_males_level_3 = self.res[i - 1, 5]
            prev_number_of_vacancies_level_3 = self.res[i - 1, 6]
            prev_number_of_vacancies_level_2 = self.res[i - 1, 7]
            prev_number_of_vacancies_level_1 = self.res[i - 1, 8]
            prev_promotion_rate_female_level_1 = self.female_promotion_probability_1
            prev_promotion_rate_female_level_2 = self.female_promotion_probability_2
            if np.isnan(prev_promotion_rate_female_level_1):
                prev_promotion_rate_female_level_1 = 0
            if np.isnan(prev_promotion_rate_female_level_2):
                prev_promotion_rate_female_level_2 = 0
            prev_gender_proportion_of_department = np.float32(
                sum(
                    list([
                        prev_number_of_females_level_1,
                        prev_number_of_females_level_2,
                        prev_number_of_females_level_3
                    ])) / (sum(
                        list([
                            prev_number_of_females_level_1,
                            prev_number_of_females_level_2,
                            prev_number_of_females_level_3,
                            prev_number_of_males_level_1,
                            prev_number_of_males_level_2,
                            prev_number_of_males_level_3
                        ]))))

            # Process Model

            # first both female and males leave the department according to binomial probability.

            female_attrition_level_3 = binomial(prev_number_of_females_level_3,
                                                attrition_rate_female_level_3)

            male_attrition_level_3 = binomial(prev_number_of_males_level_3,
                                              attrition_rate_male_level_3)

            # the departures create a set of vacancies. These vacancies are the basis for new hiring
            total_vacancies_3 = female_attrition_level_3 + male_attrition_level_3

            # women are hired first and then men
            hiring_female_3 = binomial(
                total_vacancies_3, probability_of_outside_hire_level_3 *
                hiring_rate_female_level_3)
            hiring_male_3 = binomial(
                max(0, total_vacancies_3 - hiring_female_3),
                probability_of_outside_hire_level_3 *
                (1 - hiring_rate_female_level_3))

            total_hiring_3 = hiring_female_3 + hiring_male_3

            # level 3 vacancies that are not filled by new hires create opportunities
            # for promotion from level 2. Again women are promoted first and men second.
            # Also note the error trap that if we try to promote more professors from
            # level 2 than there exist at level 2, then we will prevent this from happening.

            vacancies_remaining_after_hiring_3 = total_vacancies_3 - total_hiring_3

            potential_promotions_after_hiring_3 = max(
                0, vacancies_remaining_after_hiring_3)

            promotions_of_females_level_2_3 = binomial(
                min(potential_promotions_after_hiring_3,
                    prev_number_of_females_level_2),
                prev_promotion_rate_female_level_2)

            promotions_of_males_level_2_3 = binomial(
                max(
                    0,
                    min(
                        potential_promotions_after_hiring_3 -
                        promotions_of_females_level_2_3,
                        prev_number_of_males_level_2)),
                male_promotion_probability_2_3)

            # attrition at level 2 - either people leave from attrition or promotion

            female_attrition_level_2 = binomial(
                max(
                    0, prev_number_of_females_level_2 -
                    promotions_of_females_level_2_3),
                attrition_rate_female_level_2)

            male_attrition_level_2 = binomial(
                max(
                    0, prev_number_of_males_level_2 -
                    promotions_of_males_level_2_3),
                attrition_rate_male_level_2)

            # the departures create a set of vacancies. These vacancies are the basis for new hiring
            total_vacancies_2 = sum(
                list([
                    female_attrition_level_2, male_attrition_level_2,
                    promotions_of_females_level_2_3,
                    promotions_of_males_level_2_3
                ]))

            hiring_female_2 = binomial(
                max(0,
                    total_vacancies_2), probability_of_outside_hire_level_2 *
                hiring_rate_female_level_2)
            hiring_male_2 = binomial(
                max(0, total_vacancies_2 - hiring_female_2),
                probability_of_outside_hire_level_2 *
                (1 - hiring_rate_female_level_2))

            total_hiring_2 = hiring_female_2 + hiring_male_2

            vacancies_remaining_after_hiring_2 = total_vacancies_2 - total_hiring_2

            potential_promotions_after_hiring_2 = max(
                0, vacancies_remaining_after_hiring_2)

            promotions_of_females_level_1_2 = binomial(
                max(
                    0,
                    min(potential_promotions_after_hiring_2,
                        prev_number_of_females_level_1)),
                prev_promotion_rate_female_level_1)
            promotions_of_males_level_1_2 = binomial(
                max(
                    0,
                    min(
                        potential_promotions_after_hiring_2 -
                        promotions_of_females_level_1_2,
                        prev_number_of_males_level_1)),
                male_promotion_probability_1_2)

            ## Level 1

            female_attrition_level_1 = binomial(
                max(
                    0, prev_number_of_females_level_1 -
                    promotions_of_females_level_1_2),
                attrition_rate_female_level_1)

            male_attrition_level_1 = binomial(
                max(
                    0, prev_number_of_males_level_1 -
                    promotions_of_males_level_1_2),
                attrition_rate_male_level_1)
            total_vacancies_1 = sum(
                list([
                    female_attrition_level_1, male_attrition_level_1,
                    promotions_of_females_level_1_2,
                    promotions_of_males_level_1_2
                ]))

            hiring_female_1 = binomial(max(0, total_vacancies_1),
                                       hiring_rate_female_level_1)
            hiring_male_1 = binomial(
                max(0, total_vacancies_1 - hiring_female_1),
                1 - hiring_rate_female_level_1)

            # Write state variables to array and move to next iteration

            self.res[i, 0] = number_of_females_level_1 = sum(
                list([
                    prev_number_of_females_level_1,
                    neg(female_attrition_level_1),
                    neg(promotions_of_females_level_1_2), hiring_female_1
                ]))

            assert (number_of_females_level_1 >=
                    0), "negative number of females 1"

            self.res[i, 1] = number_of_females_level_2 = max(
                0,
                sum(
                    list([
                        prev_number_of_females_level_2,
                        neg(female_attrition_level_2),
                        neg(promotions_of_females_level_2_3),
                        promotions_of_females_level_1_2, hiring_female_2
                    ])))
            self.res[i, 2] = number_of_females_level_3 = sum(
                list([
                    prev_number_of_females_level_3,
                    neg(female_attrition_level_3),
                    promotions_of_females_level_2_3, hiring_female_3
                ]))

            self.res[i, 3] = number_of_males_level_1 = sum(
                list([
                    prev_number_of_males_level_1,
                    neg(male_attrition_level_1),
                    neg(promotions_of_males_level_1_2), hiring_male_1
                ]))

            self.res[i, 4] = number_of_males_level_2 = sum(
                list([
                    prev_number_of_males_level_2,
                    neg(male_attrition_level_2),
                    neg(promotions_of_males_level_2_3),
                    promotions_of_males_level_1_2, hiring_male_2
                ]))

            self.res[i, 5] = number_of_males_level_3 = sum(
                list([
                    prev_number_of_males_level_3,
                    neg(male_attrition_level_3), promotions_of_males_level_2_3,
                    hiring_male_3
                ]))

            self.res[i, 6] = number_of_vacancies_level_3 = sum(
                list([male_attrition_level_3, female_attrition_level_3]))

            self.res[i, 7] = number_of_vacancies_level_2 = sum(
                list([
                    male_attrition_level_2, female_attrition_level_2,
                    promotions_of_females_level_2_3,
                    promotions_of_males_level_2_3
                ]))

            self.res[i, 8] = number_of_vacancies_level_1 = sum(
                list([
                    male_attrition_level_1, female_attrition_level_1,
                    promotions_of_males_level_1_2,
                    promotions_of_females_level_1_2
                ]))

            self.res[i, 9] = promotion_rate_female_level_1 = np.float32(
                number_of_females_level_1 /
                sum(list([number_of_females_level_1, number_of_males_level_1
                          ])))
            self.res[i, 10] = promotion_rate_women_level_2 = np.float32(
                number_of_females_level_2 /
                sum(list([number_of_females_level_2, number_of_males_level_2
                          ])))
            self.res[i, 11] = gender_proportion_of_department = np.float32(
                truediv(
                    sum(
                        list([
                            number_of_females_level_1,
                            number_of_females_level_2,
                            number_of_females_level_3
                        ])),
                    sum(
                        list([
                            number_of_females_level_1,
                            number_of_females_level_2,
                            number_of_females_level_3, number_of_males_level_1,
                            number_of_males_level_2, number_of_males_level_3
                        ]))))

            # print(self.res[i,:])
            ## Print Data matrix

        df_ = pd.DataFrame(self.res)
        df_.columns = [
            'f1', 'f2', 'f3', 'm1', 'm2', 'm3', 't3', 't2', 't1', 'prom1',
            'prom2', 'gendprop'
        ]
        # print(df_)
        recarray_results = df_.to_records(index=True)
        self.run = recarray_results
        return recarray_results
Пример #53
0
 def __rsub__(self, other_obj):
     self_neg = operator.neg(self)
     return operator.add(self_neg, other_obj)
Пример #54
0
 def __neg__(self):
     return Vec2d(operator.neg(self.x), operator.neg(self.y))
Пример #55
0
class TVMScriptParser(Transformer):
    """Synr AST visitor pass which finally lowers to TIR.

    Notes for Extension
    -------------------
    1. To support a new type of AST node, add a function transform_xxx().
    2. To support new functions, add the function to the appropriate registry:
        We divide allowed function calls in TVM script into 3 categories,
        intrin, scope_handler and special_stmt.
        1. intrin functions are low level functions like mod, load, and
           constants. They correspond to a tir `IRNode`. They must have a
           return value. The user can register intrin functions for the parser to
           use.
        2. scope_handler functions have no return value. They take two
           arguments: the parser and the AST node. scope_handler functions are
           used in with and for statements.
        3. special_stmt functions handle cases that do not have a corresponding
           tir `IRNode`. These functions take the parser and the AST node as
           arguments and may return a value.
        When visiting a Call node, we check the special_stmt registry first. If
        no registered function is found, we then check the intrin registry.
        When visiting With node, we check the with_scope registry.
        When visiting For node, we check the for_scope registry.
    """

    _binop_maker = {
        ast.BuiltinOp.Add: tvm.tir.Add,
        ast.BuiltinOp.Sub: tvm.tir.Sub,
        ast.BuiltinOp.Mul: tvm.tir.Mul,
        ast.BuiltinOp.Div: tvm.tir.Div,
        ast.BuiltinOp.FloorDiv: tvm.tir.FloorDiv,
        ast.BuiltinOp.Mod: tvm.tir.FloorMod,
        ast.BuiltinOp.BitOr: lambda lhs, rhs, span: operator.or_(lhs, rhs),
        ast.BuiltinOp.BitAnd: lambda lhs, rhs, span: operator.and_(lhs, rhs),
        ast.BuiltinOp.BitXor: lambda lhs, rhs, span: operator.xor(lhs, rhs),
        ast.BuiltinOp.GT: tvm.tir.GT,
        ast.BuiltinOp.GE: tvm.tir.GE,
        ast.BuiltinOp.LT: tvm.tir.LT,
        ast.BuiltinOp.LE: tvm.tir.LE,
        ast.BuiltinOp.Eq: tvm.tir.EQ,
        ast.BuiltinOp.NotEq: tvm.tir.NE,
        ast.BuiltinOp.And: tvm.tir.And,
        ast.BuiltinOp.Or: tvm.tir.Or,
    }

    _unaryop_maker = {
        ast.BuiltinOp.USub: lambda rhs, span: operator.neg(rhs),
        ast.BuiltinOp.Invert: lambda rhs, span: operator.invert(rhs),
        ast.BuiltinOp.Not: tvm.tir.Not,
    }

    def __init__(self, base_lienno, tir_namespace):
        self.context = None

        self.base_lineno = base_lienno
        self.current_lineno = 0
        self.current_col_offset = 0
        self.tir_namespace = tir_namespace
        self.meta = None

    def init_function_parsing_env(self):
        """Initialize function parsing environment"""
        self.context = ContextMaintainer(self.report_error)  # scope emitter

    def init_meta(self, meta_dict):
        if meta_dict is not None:
            self.meta = tvm.ir.load_json(json.dumps(meta_dict))

    def transform(self, node):
        """Generic transformation for visiting the AST. Dispatches to
        `transform_ClassName` for the appropriate ClassName."""
        old_lineno, old_col_offset = self.current_lineno, self.current_col_offset

        if hasattr(node, "lineno"):
            self.current_lineno = self.base_lineno + node.lineno - 1
        if hasattr(node, "col_offset"):
            self.current_col_offset = node.col_offset

        method = "transform_" + node.__class__.__name__
        visitor = getattr(self, method, self.generic_visit)
        transform_res = visitor(node)

        self.current_lineno, self.current_col_offset = old_lineno, old_col_offset

        return transform_res

    def match_tir_namespace(self, identifier: str) -> bool:
        """Check if the namespace is equal to tvm.script.tir"""
        return identifier in self.tir_namespace

    def report_error(self, message: str, span: Union[ast.Span, tvm.ir.Span]):
        """Report an error occuring at a location.

        This just dispatches to synr's DiagnosticContext.

        Parameters
        ----------
        message : str
            Error message
        span : Union[synr.ast.Span, tvm.ir.Span】
            Location of the error
        """
        if isinstance(span, tvm.ir.Span):
            span = synr_span_from_tvm(span)
        self.error(message, span)

    def parse_body(self, parent):
        """Parse remaining statements in this scope.

        Parameters
        ----------
        parent : synr.ast.Node
            Parent node of this scope. Errors will be reported here.
        """
        body = []
        spans = []
        stmt = parent
        while len(self.context.node_stack[-1]) > 0:
            stmt = self.context.node_stack[-1].pop()
            spans.append(stmt.span)
            res = self.transform(stmt)
            if res is not None:
                body.append(res)
        if len(body) == 0:
            self.report_error(
                "Expected another statement at the end of this block. Perhaps you "
                "used a concise statement and forgot to include a body afterwards.",
                stmt.span,
            )
        else:
            return (tvm.tir.SeqStmt(body,
                                    tvm_span_from_synr(ast.Span.union(spans)))
                    if len(body) > 1 else body[0])

    def parse_arg_list(self, func, node_call):
        """Match the arguments of a function call in the AST to the required
        arguments of the function. This handles positional arguments,
        positional arguments specified by name, keyword arguments, and varargs.

        Parameters
        ----------
        func : Function
            The function that provides the signature

        node_call: ast.Call
            The AST call node that calls into the function.

        Returns
        -------
        arg_list : list
            The parsed positional argument.
        """
        assert isinstance(node_call, ast.Call)
        # collect arguments
        args = [self.transform(arg) for arg in node_call.params]
        kw_args = {
            self.transform(k): self.transform(v)
            for k, v in node_call.keyword_params.items()
        }
        # get the name and parameter list of func
        if isinstance(func, (Intrin, ScopeHandler, SpecialStmt)):
            func_name, param_list = func.signature()
        else:
            self.report_error(
                "Internal Error: function must be of type Intrin, ScopeHandler or SpecialStmt, "
                f"but it is {type(func).__name__}",
                node_call.span,
            )
        # check arguments and parameter list and get a list of arguments
        reader = CallArgumentReader(func_name, args, kw_args, self, node_call)
        pos_only, kwargs, varargs = param_list
        internal_args = list()
        for i, arg_name in enumerate(pos_only):
            internal_args.append(reader.get_pos_only_arg(i + 1, arg_name))
        for i, arg_info in enumerate(kwargs):
            arg_name, default = arg_info
            internal_args.append(
                reader.get_kwarg(i + 1 + len(pos_only),
                                 arg_name,
                                 default=default))
        if varargs is not None:
            internal_args.extend(
                reader.get_varargs(len(pos_only) + len(kwargs) + 1))
        elif len(args) + len(kw_args) > len(pos_only) + len(kwargs):
            self.report_error(
                "Arguments mismatched. " +
                f"Expected {len(pos_only) + len(kwargs)} args but got " +
                f"{len(args) + len(kw_args)}",
                node_call.span,
            )
        return internal_args

    def parse_type(self, type_node, parent):
        """Parse a type annotation.

        We require the parent object to the type so that we have a place to
        report the error message if the type does not exist.
        """
        if type_node is None:
            self.report_error("A type annotation is required", parent.span)
        res_type = self.transform(type_node)
        return tvm.ir.TupleType(
            []) if res_type is None else res_type.evaluate()

    def generic_visit(self, node):
        """Fallback visitor if node type is not handled. Reports an error."""

        self.report_error(
            type(node).__name__ + " AST node is not supported", node.span)

    def transform_Module(self, node):
        """Module visitor

        Right now, we only support two formats for TVM Script.

        Example
        -------
        1. Generate a PrimFunc (If the code is printed, then it may also contain metadata)
        .. code-block:: python

            import tvm

            @tvm.script
            def A(...):
                ...

            # returns a PrimFunc
            func = A

        2. Generate an IRModule
        .. code-block:: python

            import tvm

            @tvm.script.ir_module
            class MyMod():
                @T.prim_func
                def A(...):
                    ...
                @T.prim_func
                def B(...):
                    ...

                __tvm_meta__ = ...

            # returns an IRModule
            mod = MyMod
        """
        if len(node.funcs) == 1:
            return self.transform(next(iter(node.funcs.values())))
        elif len(node.func) == 0:
            self.report_error(
                "You must supply at least one class or function definition",
                node.span)
        else:
            self.report_error(
                "Only one-function, one-class or function-with-meta source code is allowed",
                ast.Span.union([x.span
                                for x in list(node.funcs.values())[1:]]),
            )

    def transform_Class(self, node):
        """Class definition visitor.

        A class can have multiple function definitions and a single
        :code:`__tvm_meta__` statement. Each class corresponds to a single
        :code:`IRModule`.

        Example
        -------
        .. code-block:: python

            @tvm.script.ir_module
            class MyClass:
                __tvm_meta__ = {}
                def A():
                    T.evaluate(0)
        """
        if len(node.assignments) == 1:
            if not (len(node.assignments[0].lhs) == 1
                    and isinstance(node.assignments[0].lhs[0], ast.Var)
                    and node.assignments[0].lhs[0].id.name == "__tvm_meta__"):
                self.report_error(
                    "The only top level assignments allowed are `__tvm_meta__ = ...`",
                    node.assignments[0].span,
                )
            self.init_meta(MetaUnparser().do_transform(
                node.assignments[0].rhs, self._diagnostic_context))
        elif len(node.assignments) > 1:
            self.report_error(
                "Only a single top level `__tvm_meta__` is allowed",
                ast.Span.union([x.span for x in node.assignments[1:]]),
            )

        return IRModule({
            GlobalVar(name): self.transform(func)
            for name, func in node.funcs.items()
        })

    def transform_Function(self, node):
        """Function definition visitor.

        Each function definition is translated to a single :code:`PrimFunc`.

        There are a couple restrictions on TVM Script functions:
        1. Function arguments must have their types specified.
        2. The body of the function can contain :code:`func_attr` to specify
           attributes of the function (like it's name).
        3. The body of the function can also contain multiple :code:`buffer_bind`s,
           which give shape and dtype information to arguments.
        4. Return statements are implicit.

        Example
        -------
        .. code-block:: python

            @T.prim_func
            def my_function(x: T.handle):  # 1. Argument types
                T.func_attr({"global_symbol": "mmult"})  # 2. Function attributes
                X_1 = tir.buffer_bind(x, [1024, 1024])  # 3. Buffer binding
                T.evaluate(0)  # 4. This function returns 0
        """
        def check_decorator(decorators: List[ast.Expr]) -> bool:
            """Check the decorator is `T.prim_func"""
            if len(decorators) != 1:
                return False
            d: ast.Expr = decorators[0]
            return (isinstance(d, ast.Attr) and isinstance(d.object, ast.Var)
                    and self.match_tir_namespace(d.object.id.name)
                    and d.field.name == "prim_func")

        self.init_function_parsing_env()
        self.context.enter_scope(nodes=node.body.stmts)

        # add parameters of function
        for arg in node.params:
            arg_var = tvm.te.var(arg.name, self.parse_type(arg.ty, arg))
            self.context.update_symbol(arg.name, arg_var, node)
            self.context.func_params.append(arg_var)

        if not check_decorator(node.decorators):
            self.report_error(
                "All functions should be decorated by `T.prim_func`",
                node.span,
            )

        # fetch the body of root block
        body = self.parse_body(node.body)

        # return a tir.PrimFunc
        dict_attr = self.context.func_dict_attr
        ret_type = self.parse_type(node.ret_type,
                                   node) if node.ret_type is not None else None
        func = tvm.tir.PrimFunc(
            self.context.func_params,
            body,
            ret_type,
            buffer_map=self.context.func_buffer_map,
            attrs=tvm.ir.make_node("DictAttrs", **dict_attr)
            if dict_attr else None,
            span=tvm_span_from_synr(node.span),
        )

        # New Scope : Implicit root block
        # Each function contains an implicit root block in TensorIR,
        # so here we need a block scope for it.
        # If the PrimFunc is not a TensorIR func (e.g. TE scheduled func or low-level func),
        # the root block will not be added. The logic to add root block is in `_ffi_api.Complete`

        # Fix the PrimFunc
        # 1. generate root block if necessary
        # 2. generate surrounding loops for blocks if necessary

        func = call_with_error_reporting(
            self.report_error,
            node.span,
            _ffi_api.Complete,
            func,
            self.context.root_alloc_buffers,
        )

        self.context.exit_scope()
        return func

    def transform_Lambda(self, node):
        """Lambda visitor

        Return an array of input parameters and the transformed lambda body.
        """

        self.context.enter_scope(nodes=[node.body])

        # add parameters of the lambda
        arg_vars = []
        for arg in node.params:
            arg_var = tvm.te.var(arg.name)
            arg_vars.append(arg_var)
            self.context.update_symbol(arg.name, arg_var, node)

        # the body of a lambda must be an expr
        if not isinstance(node.body, ast.Expr):
            self.report_error("The body of a lambda must be an expression",
                              node.span)

        # transform the body of the lambda
        body = self.transform(node.body)

        self.context.exit_scope()
        return arg_vars, body

    def transform_Assign(self, node):
        """Assign visitor
        AST abstract grammar:
            Assign(expr* targets, expr value, string? type_comment)

        By now 3 patterns of Assign is supported:
            1. special stmts with return value
                1.1 Buffer = T.match_buffer()/T.buffer_decl()
                1.2 Var = T.var()
                1.3 Var = T.env_thread()
            2. (BufferStore) Buffer[PrimExpr, PrimExpr, ..., PrimExpr] = PrimExpr
            3. (Store)       Var[PrimExpr] = PrimExpr
            4. with scope handlers with concise scoping and var def
                4.1 var = T.allocate()
        """

        if isinstance(node.rhs, ast.Call):
            # Pattern 1 & Pattern 4
            func = self.transform(node.rhs.func_name)
            if isinstance(func, WithScopeHandler):
                if not func.concise_scope or not func.def_symbol:
                    self.report_error(
                        "with scope handler " + func.signature()[0] +
                        " is not suitable here",
                        node.rhs.span,
                    )
                # Pattern 4
                arg_list = self.parse_arg_list(func, node.rhs)
                func.enter_scope(node, self.context, arg_list,
                                 node.rhs.func_name.span)
                func.body = self.parse_body(node)
                return func.exit_scope(node, self.context, arg_list,
                                       node.rhs.func_name.span)
            elif isinstance(func, SpecialStmt):
                # Pattern 1
                arg_list = self.parse_arg_list(func, node.rhs)
                func.handle(node, self.context, arg_list,
                            node.rhs.func_name.span)
                return self.parse_body(node)
            else:
                value = self.transform(node.rhs)
                if len(node.lhs) == 1 and not isinstance(node.lhs[0], ast.Var):
                    # This is a little confusing because it only is true when
                    # we have taken this branch. We might need to clarify what
                    # exectly is allowed in Assignments in tvmscript.
                    self.report_error(
                        "Left hand side of assignment must be an unqualified variable",
                        node.span,
                    )
                ast_var = node.lhs[0]
                var = tvm.te.var(
                    ast_var.id.name,
                    self.parse_type(node.ty, ast_var),
                    span=tvm_span_from_synr(ast_var.span),
                )
                self.context.update_symbol(var.name, var, node)
                body = self.parse_body(node)
                self.context.remove_symbol(var.name)
                return tvm.tir.LetStmt(var,
                                       value,
                                       body,
                                       span=tvm_span_from_synr(node.span))

        self.report_error("Unsupported Assign stmt", node.span)

    def transform_SubscriptAssign(self, node):
        """Visitor for statements of the form :code:`x[1] = 2`."""
        symbol = self.transform(node.params[0])
        indexes = self.transform(node.params[1])
        rhs = self.transform(node.params[2])
        rhs_span = tvm_span_from_synr(node.params[2].span)
        if isinstance(symbol, tvm.tir.Buffer):
            # BufferStore
            return tvm.tir.BufferStore(
                symbol,
                tvm.runtime.convert(rhs, span=rhs_span),
                indexes,
                span=tvm_span_from_synr(node.span),
            )
        else:
            if len(indexes) != 1:
                self.report_error(
                    f"Store is only allowed with one index, but {len(indexes)} were provided.",
                    node.params[1].span,
                )
            # Store
            return tvm.tir.Store(
                symbol,
                tvm.runtime.convert(rhs, span=rhs_span),
                indexes[0],
                tvm.runtime.convert(True, span=tvm_span_from_synr(node.span)),
                span=tvm_span_from_synr(node.span),
            )

    def transform_Assert(self, node):
        """Assert visitor

        Pattern corresponds to concise mode of :code:`with T.Assert()`.
        """

        condition = self.transform(node.condition)
        if node.msg is None:
            self.report_error("Assert statements must have an error message.",
                              node.span)
        message = self.transform(node.msg)
        body = self.parse_body(node)
        return tvm.tir.AssertStmt(condition,
                                  tvm.runtime.convert(message),
                                  body,
                                  span=tvm_span_from_synr(node.span))

    def transform_For(self, node):
        """For visitor
        AST abstract grammar:
            For(expr target, expr iter, stmt* body, stmt* orelse, string? type_comment)
        By now 1 pattern of For is supported:
            1. for scope handler
                for name in T.serial()/T.parallel()/T.vectorized()/T.unroll()/range()/
                            T.grid()/T.thread_binding()
        """

        if not isinstance(node.rhs, ast.Call):
            self.report_error("The loop iterator should be a function call.",
                              node.rhs.span)
        func = self.transform(node.rhs.func_name)
        if not isinstance(func, ForScopeHandler):
            self.report_error(
                "Only For scope handlers can be used in a for statement.",
                node.rhs.func_name.span)
        # prepare for new for scope
        old_lineno, old_col_offset = self.current_lineno, self.current_col_offset
        self.current_lineno = node.span.start_line
        self.current_col_offset = node.span.start_column
        self.context.enter_scope(nodes=node.body.stmts)
        # for scope handler process the scope
        arg_list = self.parse_arg_list(func, node.rhs)
        func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span)
        func.body = self.parse_body(node)
        res = func.exit_scope(node, self.context, arg_list,
                              node.rhs.func_name.span)
        # exit the scope
        self.context.exit_scope()
        self.current_lineno, self.current_col_offset = old_lineno, old_col_offset
        return res

    def transform_While(self, node):
        """While visitor
        AST abstract grammar:
            While(expr condition, stmt* body)
        """
        condition = self.transform(node.condition)
        # body
        self.context.enter_scope(nodes=node.body.stmts)
        body = self.parse_body(node)
        self.context.exit_scope()

        return tvm.tir.While(condition,
                             body,
                             span=tvm_span_from_synr(node.span))

    def transform_With(self, node):
        """With visitor
        AST abstract grammar:
            With(withitem* items, stmt* body, string? type_comment)
            withitem = (expr context_expr, expr? optional_vars)
        By now 2 patterns of With is supported:
            1. with scope handler with symbol def
                with T.block(*axes)/T.allocate() as targets:
            2. with scope handler without symbol def
                with T.let()/T.Assert()/T.attr()/T.realize()
        """

        if not isinstance(node.rhs, ast.Call):
            self.report_error(
                "The context expression of a `with` statement should be a function call.",
                node.rhs.span,
            )

        func = self.transform(node.rhs.func_name)

        if not isinstance(func, WithScopeHandler):
            self.report_error(
                f"Function {func} cannot be used in a `with` statement.",
                node.rhs.func_name.span)
        # prepare for new block scope
        old_lineno, old_col_offset = self.current_lineno, self.current_col_offset
        self.current_lineno = node.body.span.start_line
        self.current_col_offset = node.body.span.start_column
        self.context.enter_block_scope(nodes=node.body.stmts)
        # with scope handler process the scope
        arg_list = self.parse_arg_list(func, node.rhs)
        func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span)
        func.body = self.parse_body(node)
        res = func.exit_scope(node, self.context, arg_list,
                              node.rhs.func_name.span)
        # exit the scope
        self.context.exit_block_scope()
        self.current_lineno, self.current_col_offset = old_lineno, old_col_offset
        return res

    def transform_If(self, node):
        """If visitor
        AST abstract grammar:
            If(expr test, stmt* body, stmt* orelse)
        """

        condition = self.transform(node.condition)
        # then body
        self.context.enter_scope(nodes=node.true.stmts)
        then_body = self.parse_body(node)
        self.context.exit_scope()

        # else body
        if len(node.false.stmts) > 0:
            self.context.enter_scope(nodes=node.false.stmts)
            else_body = self.parse_body(node)
            self.context.exit_scope()
        else:
            else_body = None

        return tvm.tir.IfThenElse(condition,
                                  then_body,
                                  else_body,
                                  span=tvm_span_from_synr(node.span))

    def transform_Call(self, node):
        """Call visitor

        3 different Call patterns are allowed:
            1. Intrin representing a PrimExpr/IterVar
                1.1 tir.int/uint/float8/16/32/64/floormod/floordiv/load/cast/ramp/broadcast/max
                1.2 tir.range/reduce_axis/scan_axis/opaque_axis
            2. tir.Op(dtype, ...)
            3. other callable functions
        """

        if isinstance(node.func_name, ast.Op):
            if node.func_name.name == ast.BuiltinOp.Subscript:
                return self.transform_Subscript(node)
            if node.func_name.name in self._binop_maker:
                lhs = self.transform(node.params[0])
                rhs = self.transform(node.params[1])
                return self._binop_maker[node.func_name.name](
                    lhs, rhs, span=tvm_span_from_synr(node.span))
            if node.func_name.name in self._unaryop_maker:
                rhs = self.transform(node.params[0])
                return self._unaryop_maker[node.func_name.name](
                    rhs, span=tvm_span_from_synr(node.span))
            self.report_error(f"Unsupported operator {node.func_name.name}.",
                              node.func_name.span)
        else:
            func = self.transform(node.func_name)
            if isinstance(func, Intrin) and not func.stmt:
                # pattern 1
                arg_list = self.parse_arg_list(func, node)
                return call_with_error_reporting(
                    self.report_error,
                    node.func_name.span,
                    func.handle,
                    arg_list,
                    node.func_name.span,
                )
            else:
                args = [self.transform(arg) for arg in node.params]
                kw_args = {
                    self.transform(k): self.transform(v)
                    for k, v in node.keyword_params.items()
                }
                if isinstance(func, tvm.tir.op.Op):
                    # pattern 2
                    return tvm.tir.Call(kw_args["dtype"],
                                        func,
                                        args,
                                        span=tvm_span_from_synr(node.span))
                elif callable(func):
                    # pattern 3
                    return func(*args, **kw_args)
                else:
                    self.report_error(
                        f"Function is neither callable nor a tvm.tir.op.Op (it is a {type(func)}).",
                        node.func_name.span,
                    )

    def transform_UnassignedCall(self, node):
        """Visitor for statements that are function calls.

        This handles function calls that appear on thier own line like `tir.realize`.

        Examples
        --------
        .. code-block:: python

            @T.prim_func
            def f():
                A = T.buffer_decl([10, 10])
                T.realize(A[1:2, 1:2], "")  # This is an UnassignedCall
                A[1, 1] = 2  # This is also an UnassignedCall
        """
        # Only allowed builtin operator that can be a statement is x[1] = 3 i.e. subscript assign.
        if isinstance(node.call.func_name, ast.Op):
            if node.call.func_name.name != ast.BuiltinOp.SubscriptAssign:
                self.report_error(
                    "Binary and unary operators are not allowed as a statement",
                    node.span)
            else:
                return self.transform_SubscriptAssign(node.call)

        # handle a regular function call
        func = self.transform(node.call.func_name)
        arg_list = self.parse_arg_list(func, node.call)

        if isinstance(func, tir.scope_handler.AssertHandler):
            self.report_error(
                "A standalone `T.Assert` is not allowed. Use `assert condition, message` "
                "instead.",
                node.call.func_name.span,
            )

        if isinstance(func, Intrin):
            if func.stmt:
                return call_with_error_reporting(
                    self.report_error,
                    node.call.func_name.span,
                    func.handle,
                    arg_list,
                    node.call.func_name.span,
                )
            else:
                self.report_error(
                    f"This intrinsic cannot be used as a statement.",
                    node.call.span)
        elif isinstance(func, WithScopeHandler
                        ) and func.concise_scope and not func.def_symbol:
            func.enter_scope(node, self.context, arg_list,
                             node.call.func_name.span)
            func.body = self.parse_body(node)
            return func.exit_scope(node, self.context, arg_list,
                                   node.call.func_name.span)
        elif isinstance(func, SpecialStmt) and not func.def_symbol:
            func.handle(node, self.context, arg_list, node.call.func_name.span)
            return

        self.report_error(
            "Unexpected statement. Expected an assert, an intrinsic, a with statement, or a "
            f"special statement, but got {type(func).__name__}.",
            node.call.func_name.span,
        )

    def transform_Slice(self, node):
        start = self.transform(node.start)
        end = self.transform(node.end)
        if not (isinstance(node.step, ast.Constant) and node.step.value == 1):
            self.report_error("Only step size 1 is supported for slices.",
                              node.step.span)
        return Slice(start, end)

    def transform_Subscript(self, node):
        """Array access visitor.

        By now only 3 types of Subscript are supported:
            1. Buffer[index, index, ...], Buffer element access(BufferLoad & BufferStore)
               Var[index] Buffer element access()
            2. Buffer[start: stop, start: stop, ...], BufferRealize(realize(buffer[...]))
            3. Array[index], Buffer element access
        """

        symbol = self.transform(node.params[0])
        if symbol is None:
            self.report_error(
                f"Variable {node.params[0].id.name} is not defined.",
                node.params[0].span)

        indexes = [self.transform(x) for x in node.params[1].values]
        if isinstance(symbol, tvm.tir.expr.Var):
            for index in indexes:
                if not isinstance(index, (tvm.tir.PrimExpr, int)):
                    self.report_error(
                        "Buffer load indexes should be int or PrimExpr, but they are "
                        + type(index),
                        node.span,
                    )
            return tvm.tir.Load("float32",
                                symbol,
                                indexes,
                                True,
                                span=tvm_span_from_synr(node.span))
        elif isinstance(symbol, tvm.tir.Buffer):
            return BufferSlice(symbol,
                               indexes,
                               self.report_error,
                               span=tvm_span_from_synr(node.span))
        elif isinstance(symbol, tvm.container.Array):
            if len(indexes) > 1:
                self.report_error(
                    "Array access should be one-dimension access, but the indices are "
                    + str(indexes),
                    node.span,
                )
            index = indexes[0]
            if not isinstance(index, (int, tvm.tir.expr.IntImm)):
                self.report_error(
                    "Array access index expected int or IntImm, but got " +
                    type(index),
                    node.span,
                )
            if int(index) >= len(symbol):
                self.report_error(
                    f"Array access out of bound, size: {len(symbol)}, got index {index}.",
                    node.span,
                )
            return symbol[int(index)]
        else:
            self.report_error(
                f"Cannot subscript from a {type(symbol).__name__}. Only variables and "
                "buffers are supported.",
                node.params[0].span,
            )

    def transform_Attr(self, node):
        """Visitor for field access of the form `x.y`.

        This visitor is used to lookup function and symbol names. We have two
        cases to handle here:
        1. If we have a statement of the form `tir.something`, then we lookup
           `tir.something` in the `Registry`. If the function is not in the
           registry, then we try to find a `tvm.ir.op.Op` with the same name.
        2. All other names `tvm.something` are lookup up in this current python
           namespace.
        """
        def get_full_attr_name(node: ast.Attr) -> str:
            reverse_field_names = [node.field.name]
            while isinstance(node.object, ast.Attr):
                node = node.object
                reverse_field_names.append(node.field.name)
            if isinstance(node.object, ast.Var):
                reverse_field_names.append(node.object.id.name)
            return ".".join(reversed(reverse_field_names))

        if isinstance(node.object, (ast.Var, ast.Attr)):
            full_attr_name = get_full_attr_name(node)
            attr_object, fields = full_attr_name.split(".", maxsplit=1)
            if self.match_tir_namespace(attr_object):
                func_name = "tir." + fields
                res = Registry.lookup(func_name)
                if res is not None:
                    return res
                try:
                    return tvm.ir.op.Op.get(func_name)
                except TVMError as e:
                    # Check if we got an attribute error
                    if e.args[0].find("AttributeError"):
                        self.report_error(
                            f"Unregistered function `tir.{fields}`.",
                            node.span)
                    else:
                        raise e

        symbol = self.transform(node.object)
        if symbol is None:
            self.report_error("Unsupported Attribute expression.",
                              node.object.span)
        if not hasattr(symbol, node.field.name):
            self.report_error(
                f"Type {type(symbol)} does not have a field called `{node.field.name}`.",
                node.span)
        res = getattr(symbol, node.field.name)
        return res

    def transform_TypeAttr(self, node):
        """Visitor for field access of the form `x.y` for types.

        We have two cases here:
        1. If the type is of the form `T.something`, we look up the type in
           the `tir` namespace in this module.
        2. If the type is of the form `tvm.x.something` then we look up
           `tvm.x.something` in this modules namespace.
        """
        if isinstance(node.object, ast.TypeVar):
            if self.match_tir_namespace(node.object.id.name):
                if not hasattr(tir, node.field.name):
                    self.report_error(
                        f"Invalid type annotation `tir.{node.field.name}`.",
                        node.span)
                return getattr(tir, node.field.name)

        symbol = self.transform(node.object)
        if symbol is None:
            self.report_error("Unsupported Attribute expression",
                              node.object.span)
        if not hasattr(symbol, node.field):
            self.report_error(
                f"Type {type(symbol)} does not have a field called `{node.field}`.",
                node.span)
        res = getattr(symbol, node.field)
        return res

    def transform_DictLiteral(self, node):
        """Dictionary literal visitor.

        Handles dictionary literals of the form `{x:y, z:2}`.
        """

        keys = [self.transform(key) for key in node.keys]
        values = [self.transform(value) for value in node.values]

        return dict(zip(keys, values))

    def transform_Tuple(self, node):
        """Tuple visitor.

        Handles tuples of the form `(x, y, 2)`.
        """

        return tuple(self.transform(element) for element in node.values)

    def transform_ArrayLiteral(self, node):
        """List literal visitor.

        Handles lists of the form `[x, 2, 3]`.
        """

        return [self.transform(element) for element in node.values]

    def transform_Var(self, node):
        """Variable visitor

        Handles variables like `x` in `x = 2`.
        """

        name = node.id.name
        if name == "meta":
            return self.meta
        symbol = Registry.lookup(name)
        if symbol is not None:
            return symbol
        symbol = self.context.lookup_symbol(name)
        if symbol is not None:
            return symbol
        self.report_error(f"Unknown identifier {name}.", node.span)

    def transform_TypeVar(self, node):
        """Type variable visitor.

        Equivalent to `transform_Var` but for types.
        """
        name = node.id.name
        symbol = Registry.lookup(name) or self.context.lookup_symbol(name)
        if symbol is not None:
            return symbol
        self.report_error(f"Unknown identifier {name}.", node.span)

    def transform_Constant(self, node):
        """Constant value visitor.

        Constant values include `None`, `"strings"`, `2` (integers), `4.2`
        (floats), and `true` (booleans).
        """
        return tvm.runtime.convert(node.value,
                                   span=tvm_span_from_synr(node.span))

    def transform_TypeConstant(self, node):
        """Constant value visitor for types.

        See `transform_Constant`.
        """
        return node.value

    def transform_Return(self, node):
        self.report_error(
            "TVM script does not support return statements. Instead the last statement in any "
            "block is implicitly returned.",
            node.span,
        )
Пример #56
0
def notBitStream(bs1):
    return opBitStream(lambda x: operator.neg(x)-1, bs1)
Пример #57
0
 def __neg__(self):
     return vec2d(operator.neg(self.x), operator.neg(self.y))
Пример #58
0
from operator import abs, neg, pos, add, floordiv, mod, mul, pow, sub, and_, truediv, invert, lshift, or_, rshift, xor

a = -1
b = 5.0
c = 2
d = 6

print('a = ', a)
print('b = ', b)
print('c = ', c)
print('d = ', d)

print('\nPositive/Negative:')
print(f'abs({a}):', abs(a))
print(f'neg({a}):', neg(a))
print(f'neg({b}):', neg(b))
print(f'pos({a}):', pos(a))
print(f'pos({b}):', pos(b))

print('\nArithmetic:')
print(f'add({a}, {b}):', add(a, b))
print(f'floordiv({a}, {b}):', floordiv(a, b))
print(f'mod({a},{b}): ', mod(a, b))
print(f'mul({a},{b}): ', mul(a, b))
print(f'pow({c},{d}):', pow(c, d))
print(f'sub({b},{a}):', sub(b, a))
print(f'truediv({a},{b}):', truediv(a, b))
print(f'truediv({d},{c}):', truediv(d, c))

print('\nBitwise:')
print(f'and_({c}, {d}) :', and_(c, d))
Пример #59
0
 def p_expression_uminus(p):
     """expression : MINUS expression %prec UMINUS"""
     p[0] = operator.neg(p[2])
Пример #60
0
 def __sub__(self, other):
     assert type(other) == type(self)
     # basic check: negating twice should be identity
     assert op.neg(op.neg(other)) == other
     return self + (-other)