def for_each_facility(self, data, tracer, previous_cycle_data=None): df1_records = filter_consumption_records(data, tracer.extras[DF1]) df2_records = filter_consumption_records(data, tracer.extras[DF2]) df1_count = len(df1_records) df2_count = len(df2_records) df1_values = values_for_records(tracer.extras.get(FIELDS, []), df1_records) df2_values = values_for_records(tracer.extras.get(FIELDS, []), df2_records) all_df1_fields_are_blank = pydash.every( df1_values, lambda x: x is None ) and len( df1_values ) > 0 all_df2_fields_are_blank = pydash.every( df2_values, lambda x: x is None ) and len( df2_values ) > 0 sum_df1 = pydash.chain(df1_values).reject(lambda x: x is None).map( float ).sum().value() sum_df2 = pydash.chain(df2_values).reject(lambda x: x is None).map( float ).sum().value() if df1_count == 0 or df2_count == 0: return NOT_REPORTING if all_df1_fields_are_blank or all_df2_fields_are_blank: result = NO elif (sum_df2 == 0 and sum_df1 == 0) or ( sum_df2 != 0 and 0.7 < abs(sum_df1 / sum_df2) < 1.429 ): result = YES else: result = NO return result
def for_each_facility(self, data, combination, other_cycle_data={}): ratio = combination.get(RATIO) df1_records = filter_consumption_records(data, combination[DF1]) df2_records = filter_consumption_records(data, combination[DF2]) other_records = filter_consumption_records(data, combination.get(OTHER, [])) df1_count = len(df1_records) df2_count = len(df2_records) + len(other_records) df1_values = values_for_records(combination[FIELDS], df1_records) df2_values = values_for_records(combination[FIELDS], df2_records) other_values = values_for_records(combination[FIELDS], other_records) sum_df1 = pydash.chain(df1_values).reject(lambda x: x is None).map(float).sum().value() sum_df2 = pydash.chain(df2_values).reject(lambda x: x is None).map(float).sum().value() other_sum = pydash.chain(other_values).reject(lambda x: x is None).map(float).sum().value() all_df1_fields_are_blank = pydash.every(df1_values, lambda x: x is None) and len(df1_values) > 0 b1 = pydash.every(df2_values, lambda x: x is None) and len(df2_values) > 0 b2 = pydash.every(other_values, lambda x: x is None) and len(other_values) > 0 all_df2_fields_are_blank = b1 and b2 adjusted_sum_df1 = sum_df1 / ratio numerator = adjusted_sum_df1 denominator = (sum_df2 / ratio) + other_sum if df1_count == 0 or df2_count == 0: return NOT_REPORTING if all_df1_fields_are_blank or all_df2_fields_are_blank: result = NO elif (sum_df2 == 0 and sum_df1 == 0) or (denominator != 0 and 0.7 < abs(numerator / denominator) < 1.429): result = YES else: result = NO return result
def getBoardWinner(self, board: Board2D, toWin: int) -> Winner: for i in range(len(board)): for j in range(len(board) - toWin + 1): if board[i][j] == 'E': continue cells = [board[i][j + k] for k in range(toWin)] if _.every(cells, lambda cell: cell == cells[0]): return cells[0] for j in range(len(board)): for i in range(len(board) - toWin + 1): if board[i][j] == 'E': continue cells = [board[i + k][j] for k in range(toWin)] if _.every(cells, lambda cell: cell == cells[0]): return cells[0] for i in range(len(board) - toWin + 1): for j in range(len(board) - toWin + 1): if board[i][j] == 'E' or board[i + toWin - 1][j] == 'E': continue cells = [board[i + k][j + k] for k in range(toWin)] if _.every(cells, lambda cell: cell == cells[0]): return cells[0] cells = [board[i - k + toWin - 1][j + k] for k in range(toWin)] if _.every(cells, lambda cell: cell == cells[0]): return cells[0] return 'Draw'
def for_each_facility(self, data, combination, previous_cycle_data=None): ratio = combination[RATIO] df1_records = filter_consumption_records(data, combination[DF1]) df2_records = filter_consumption_records(data, combination[DF2]) df1_count = len(df1_records) df2_count = len(df2_records) df1_values = values_for_records(combination[FIELDS], df1_records) df2_values = values_for_records(combination[FIELDS], df2_records) sum_df1 = pydash.chain(df1_values).reject(lambda x: x is None).sum().value() sum_df2 = pydash.chain(df2_values).reject(lambda x: x is None).sum().value() all_df1_fields_are_blank = pydash.every(df1_values, lambda x: x is None) all_df2_fields_are_blank = pydash.every(df2_values, lambda x: x is None) return calculate_score(df1_count, df2_count, sum_df1, sum_df2, ratio, all_df1_fields_are_blank, all_df2_fields_are_blank, facility_not_reporting(data))
def for_each_facility(self, data, combination, previous_cycle_data=None): df1_records = get_consumption_records(data, combination[CONSUMPTION_QUERY]) values = values_for_records(self.fields, df1_records) all_cells_not_negative = pydash.every(values, lambda x: x is None or x >= 0) if len(df1_records) == 0: return NOT_REPORTING return YES if all_cells_not_negative else NO
def for_each_facility(self, data, tracer, other_cycle_data={}): ratio = tracer.extras.get(RATIO) df1_records = filter_consumption_records(data, tracer.extras[DF1]) df2_records = filter_consumption_records(data, tracer.extras[DF2]) other_records = filter_consumption_records(data, tracer.extras.get(OTHER, [])) df1_count = len(df1_records) df2_count = len(df2_records) + len(other_records) df1_values = values_for_records(tracer.extras[FIELDS], df1_records) df2_values = values_for_records(tracer.extras[FIELDS], df2_records) other_values = values_for_records(tracer.extras[FIELDS], other_records) sum_df1 = pydash.chain(df1_values).reject(lambda x: x is None).map( float ).sum().value() sum_df2 = pydash.chain(df2_values).reject(lambda x: x is None).map( float ).sum().value() other_sum = pydash.chain(other_values).reject(lambda x: x is None).map( float ).sum().value() all_df1_fields_are_blank = pydash.every( df1_values, lambda x: x is None ) and len( df1_values ) > 0 b1 = pydash.every(df2_values, lambda x: x is None) and len(df2_values) > 0 b2 = pydash.every(other_values, lambda x: x is None) and len(other_values) > 0 all_df2_fields_are_blank = b1 and b2 adjusted_sum_df1 = sum_df1 / ratio numerator = adjusted_sum_df1 denominator = (sum_df2 / ratio) + other_sum if df1_count == 0 or df2_count == 0: return NOT_REPORTING if all_df1_fields_are_blank or all_df2_fields_are_blank: result = NO elif (sum_df2 == 0 and sum_df1 == 0) or ( denominator != 0 and 0.7 < abs(numerator / denominator) < 1.429 ): result = YES else: result = NO return result
def _getBoardWinner(self, board: Board3D, toWin: int, x: int, y: int, z: int) -> Winner: possibilities = np.full((toWin**3 * 3 + 4, toWin), ord('E')) for i in range(x, x + toWin): for j in range(y, y + toWin): for k in range(z, z + toWin): index = self._possibilitiesIndex(Rows, toWin, i - x, j - y, 0) possibilities[index][k - z] = ord(board[i][j][k]) index = self._possibilitiesIndex(Rows, toWin, i - x, k - z, 1) possibilities[index][j - y] = ord(board[i][j][k]) index = self._possibilitiesIndex(Rows, toWin, j - y, k - z, 2) possibilities[index][i - x] = ord(board[i][j][k]) index = self._possibilitiesIndex(BoardDiagonals, toWin, i - x, j - y, 0) possibilities[index][k - z] = ord(board[i][k][k]) index = self._possibilitiesIndex(BoardDiagonals, toWin, i - x, k - z, 1) possibilities[index][j - y] = ord(board[j][j][k]) index = self._possibilitiesIndex(BoardDiagonals, toWin, j - y, k - z, 2) possibilities[index][i - x] = ord(board[i][j][i]) index = self._possibilitiesIndex(BoardDiagonals, toWin, i - x, j - y, 3) possibilities[index][k - z] = ord(board[i][k][toWin - k - 1 + z]) index = self._possibilitiesIndex(BoardDiagonals, toWin, i - x, k - z, 4) possibilities[index][j - y] = ord(board[j][toWin - j - 1 + y][k]) index = self._possibilitiesIndex(BoardDiagonals, toWin, j - y, k - z, 5) possibilities[index][i - x] = ord(board[toWin - i - 1 + x][j][i]) index = self._possibilitiesIndex(SpaceDiagonals, toWin, 0, 0, 0) possibilities[index][i - x] = ord(board[i][i][i]) index = self._possibilitiesIndex(SpaceDiagonals, toWin, 0, 0, 1) possibilities[index][i - x] = ord(board[toWin - i - 1 + x][i][i]) index = self._possibilitiesIndex(SpaceDiagonals, toWin, 0, 0, 2) possibilities[index][i - x] = ord(board[i][toWin - i - 1 + x][i]) index = self._possibilitiesIndex(SpaceDiagonals, toWin, 0, 0, 3) possibilities[index][i - x] = ord(board[i][i][toWin - i - 1 + x]) # print('\n'.join(['{:3} '.format(i) + ''.join(['{:4}'.format(chr(item)) for item in possibilities[i]]) for i in range(len(possibilities))])) arr: List[np.ndarray] = possibilities.tolist() winner = _.find( arr, lambda p: p[0] != ord('E') and _.every(p, lambda x: x == p[0])) return 'Draw' if winner is None else chr(winner[0])
def for_each_facility(self, data, combination, previous_cycle_data=None): ratio = combination[RATIO] df1_records = filter_consumption_records(data, combination[DF1]) df2_records = filter_consumption_records(data, combination[DF2]) df1_count = len(df1_records) df2_count = len(df2_records) df1_values = values_for_records(combination[FIELDS], df1_records) df2_values = values_for_records(combination[FIELDS], df2_records) sum_df1 = pydash.chain(df1_values).reject( lambda x: x is None).sum().value() sum_df2 = pydash.chain(df2_values).reject( lambda x: x is None).sum().value() all_df1_fields_are_blank = pydash.every(df1_values, lambda x: x is None) all_df2_fields_are_blank = pydash.every(df2_values, lambda x: x is None) return calculate_score(df1_count, df2_count, sum_df1, sum_df2, ratio, all_df1_fields_are_blank, all_df2_fields_are_blank, facility_not_reporting(data))
def for_each_facility(self, data, combination, previous_cycle_data=None): df1_records = filter_consumption_records(data, combination[DF1]) df2_records = filter_consumption_records(data, combination[DF2]) df1_count = len(df1_records) df2_count = len(df2_records) df1_values = values_for_records(combination.get(FIELDS, []), df1_records) df2_values = values_for_records(combination.get(FIELDS, []), df2_records) all_df1_fields_are_blank = pydash.every(df1_values, lambda x: x is None) and len(df1_values) > 0 all_df2_fields_are_blank = pydash.every(df2_values, lambda x: x is None) and len(df2_values) > 0 sum_df1 = pydash.chain(df1_values).reject(lambda x: x is None).map(float).sum().value() sum_df2 = pydash.chain(df2_values).reject(lambda x: x is None).map(float).sum().value() if df1_count == 0 or df2_count == 0: return NOT_REPORTING if all_df1_fields_are_blank or all_df2_fields_are_blank: result = NO elif (sum_df2 == 0 and sum_df1 == 0) or (sum_df2 != 0 and 0.7 < abs(sum_df1 / sum_df2) < 1.429): result = YES else: result = NO return result
def as_result(self, group1, group2, constant=100.0): if type(group1) is list: if len(group1) < 1 or (group2 is not None and len(group2) < 1): return "NOT_REPORTING" values = list(group1) if group2 is not None: values.extend(group2) all_zero = pydash.every(values, lambda x: x == 0) if all_zero: return "YES" result = self.compare(group1, group2, constant) return "YES" if result else "NO"
def all(cls, *args): try: dem_args = list(args) if some(dem_args, lambda result: isinstance(result, Error)): return head( filter_(dem_args, lambda result: isinstance(result, Error))) if every(dem_args, lambda result: isinstance(result, Ok) == False): return Error( Exception('Some items passed in were not a Result.')) return Ok(map_(dem_args, lambda result: result.getValue())) except Exception as e: return Error(e)
def groups_have_adequate_data(self, groups): valid_groups = py_(groups).reject(lambda x: x is None).value() is_two_cycle = "Previous" in py_(valid_groups).map( lambda group_result: group_result.group.cycle.id).value() if (is_two_cycle and not py_(valid_groups).every(lambda group_result: len( group_result.factored_records) > 0).value()): return False number_of_records = py_(valid_groups).map( lambda group_result: group_result.factored_records).flatten().size( ).value() has_adequate_data = number_of_records > 0 if has_adequate_data: resu = pydash.every(valid_groups, lambda x: x.is_above_threshold()) return resu return has_adequate_data
def test_every(case, expected): assert _.every(*case) == expected
def all_values_blank(self): return pydash.every(self.factored_records, lambda data_record: data_record.all_blank())
def has_all_blanks(records, fields): return pydash.every(values_for_records(fields, records), lambda x: x is None)
def iteratee(item): return pyd.every(self.funcs, lambda func: func(item))
def __call__(self, obj): """Return result of conjoin `obj` with :attr:`funcs` predicates.""" def iteratee(item): return pyd.every(self.funcs, lambda func: func(item)) return pyd.every(obj, iteratee)
def all_blank(self): return pydash.every(self.values, lambda x: x is None)
def callback(item): return pyd.every(self.funcs, lambda func: func(item))
def __call__(self, obj): """Return result of conjoin `obj` with :attr:`funcs` predicates.""" return pyd.every(obj, lambda item: pyd.every(self.funcs, lambda func: func(item)))