def parse_pieces_in_hand(target): if target == 'なし': # None in japanese return ordered_dict() result = ordered_dict() for item in target.split(' '): if len(item) == 1: result[shogi.PIECE_JAPANESE_SYMBOLS.index(item)] = 1 elif len(item) == 2 or len(item) == 3: result[shogi.PIECE_JAPANESE_SYMBOLS.index(item[0])] = \ shogi.NUMBER_JAPANESE_KANJI_SYMBOLS.index(item[1:]) elif len(item) == 0: pass else: raise ParserException('Invalid pieces in hand') return result
class BaselineFilterRange(object): """ Baseline Filter Range """ WHOLE_SPECTRUM = 0 BETWEEN_CURSORS = 1 # Items for the Voigt fitting tool radio buttons choices = ordered_dict(( (WHOLE_SPECTRUM, "Whole Spectrum"), (BETWEEN_CURSORS, "Between Cursors"), ))
class FittingOptimizeAlgorithm(object): """ Optimization Algorithm """ NONE = 0 CONSTRAINED_LEVENBERG_MARQUARDT = 1 # Items for the Voigt fitting tool radio buttons choices = ordered_dict(( (NONE, "None"), (CONSTRAINED_LEVENBERG_MARQUARDT, "ccfit"), ))
class FittingOptimizeWeights(object): """ Macromolecular Linewidths Method """ EVEN_WEIGHTING = 0 LOCAL_WEIGHTING = 1 # Items for the Voigt fitting tool radio buttons choices = ordered_dict(( (EVEN_WEIGHTING, "Even Weighting"), (LOCAL_WEIGHTING, "Local Weighting"), ))
class FittingMacromoleculeLinewidths(object): """ Macromolecular Linewidths Method """ LUMPED = 0 INDEPENDENT = 1 # Items for the Voigt fitting tool radio buttons choices = ordered_dict(( (LUMPED, "Lumped"), (INDEPENDENT, "Independent"), ))
class FittingMacromoleculePeak(object): """ Macromolecular Peak Method """ GROUPED_PEAKS = 0 INDIVIDUAL_PEAKS = 1 # Items for the Voigt fitting tool radio buttons choices = ordered_dict(( (GROUPED_PEAKS, "Grouped Peaks"), (INDIVIDUAL_PEAKS, "Individual Peaks"), ))
class ZeroFillMultiplier(object): """ Zero fill multiplier constants. The zero fill is not only limited to a specific range, but it must also be an integral power of 2. """ _EXPONENT = 5 MIN = 1 MAX = 2**_EXPONENT # Items for the spatial processing options choices = [(2**i, str(2**i)) for i in range(0, _EXPONENT + 1)] choices = ordered_dict(choices)
class BaselineFilterMethod(object): """ Baseline Filter Method """ NONE = 0 LOWESS = 1 BSPLINE = 2 # Items for the Voigt fitting tool radio buttons choices = ordered_dict(( (NONE, "None"), (LOWESS, "Lowess"), (BSPLINE, "B-Spline"), ))
class FittingLineshapeModel(object): """ Lineshape model """ VOIGT = 0 LORENTZ = 1 GAUSS = 2 # Items for the Voigt fitting tool radio buttons choices = ordered_dict(( (VOIGT, "Voigt"), (LORENTZ, "Lorentz"), (GAUSS, "Gauss"), ))
class XYZOrientation(object): """ XYZ Orientation constants """ # Integral Image along 0:X-Y/1:Y-Z/2:X-Z direction XY_ORIENTATION = 0 YZ_ORIENTATION = 1 XZ_ORIENTATION = 2 # Items for the XYZ browser options choices = ordered_dict(( (XY_ORIENTATION, "XY_Orientation"), (YZ_ORIENTATION, "YZ_Orientation"), (XZ_ORIENTATION, "XZ_Orientation"), ))
class FittingBaselineAlgorithm(object): """ Baseline Algorithm """ NONE = 0 VARIABLE_KNOT_SPLINE = 1 FIXED_KNOT_SPLINE = 2 WAVELET_FILTER = 3 # Items for the Voigt fitting tool radio buttons choices = ordered_dict(( (NONE, "None"), (VARIABLE_KNOT_SPLINE, "Variable Knot Spline"), (FIXED_KNOT_SPLINE, "Fixed Knot Spline"), (WAVELET_FILTER, "Wavelet Filter"), ))
def get_information_for_timecourse(self, time, keys_list='all'): """ Returns ordered dictionary with time ('time') agent-ID ('h_ID') and information on stati/location/flags. All stati temporary and cumulative flags are encoded by one integer to save memory. Arguments to provide are: none """ out = ordered_dict() out['time'] = numpy.uint16(time) # write down current time-step# out['h_ID'] = numpy.uint16(self.ID) # write down agent-ID# out['loc'] = numpy.uint16( self.loc.ID) # write down ID of current location# out['status'] = self.encode_stati( ) # write down agent-status, in encoded fashion# # write down temporary agent-flags, in encoded fashion# out['Temporary_Flags'] = self.encode_temporary_flags() # write down cumulative agent-flags, in encoded fashion# out['Cumulative_Flags'] = self.encode_cumulative_flags() out['Interaction_partner'] = ','.join(self.contact_persons) out['Infection_event'] = numpy.int32(self.infected_by) out['Strain'] = self.infection_strain if keys_list == 'all': return (out) else: return (ordered_dict((i, out[i]) for i in keys_list))
class EddyCurrentCorrection(object): """ Eddy current correction constants """ # These constants are arbitrary and may change. # However bool(NONE) is guaranteed to be False NONE = 0 SIMPLE = 1 MIN_NOISE_FILTER = 0.1 MAX_NOISE_FILTER = 100.0 # Items for the spectral processing options dropdown choices = ordered_dict(( (NONE, "Off"), (SIMPLE, "Simple"), ))
class SpatialFilter(object): """ Spatial filter constants """ # These constants are arbitrary and may change. # However bool(NONE) is guaranteed to be False NONE = 0 HAMMING = 1 EXPONENTIAL = 2 GAUSSIAN = 3 # Items for the spatial processing options dropdown choices = ordered_dict(( (NONE, "None"), (HAMMING, "Hamming"), (EXPONENTIAL, "Exponential"), (GAUSSIAN, "Gaussian"), ))
class WaterExtrapolation(object): """ Water extrapolation constants """ # These constants are arbitrary and may change. # However bool(NONE) is guaranteed to be False NONE = 0 LINEAR = 1 AR_MODEL = 2 MIN_POINTS = 1 MAX_POINTS = 1000 # Items for the spectral processing options dropdown choices = ordered_dict(( (NONE, "None"), (LINEAR, "Linear"), (AR_MODEL, "AR Model"), ))
class Apodization(object): """ Apodization constants """ MIN_WIDTH = 0 MAX_WIDTH = 100 # These constants are arbitrary and may change. # However bool(NONE) is guaranteed to be False NONE = 0 GAUSSIAN = 1 LORENTZIAN = 2 # Items for the spectral processing options dropdown choices = ordered_dict(( (NONE, "None"), (GAUSSIAN, "Gaussian"), (LORENTZIAN, "Lorentzian"), ))
class XYZImageType(object): """ XYZ Image Type constants """ # Integral Image along 0:X-Y/1:Y-Z/2:X-Z direction MAGNITUDE = 0 REAL = 1 IMAGINARY = 2 MAGNITUDE_PLUS_MASK = 3 B0_MAP = 4 # Items for the XYZ browser options choices = ordered_dict(( (MAGNITUDE, "Magnitude"), (REAL, "Real"), (IMAGINARY, "Imaginary"), (MAGNITUDE_PLUS_MASK, "Magnitude plus Mask"), (B0_MAP, "B0 Map"), ))
class SpatialTranspose(object): """ Spatial transposition constants """ # These constants are arbitrary and may change. # However bool(NONE) is guaranteed to be False NONE = 0 TRANSPOSE_XY = 1 # x <-> y TRANSPOSE_XZ = 2 # x <-> z TRANSPOSE_YZ = 3 # y <-> z TRANSPOSE_XYZ = 4 # x->y->z->x # Items for the spatial processing options choices = ordered_dict(( (NONE, "None"), (TRANSPOSE_XY, "Transpose_XY"), (TRANSPOSE_XZ, "Transpose_XZ"), (TRANSPOSE_YZ, "Transpose_YZ"), (TRANSPOSE_XYZ, "Transpose_XYZ"), ))
class WaterFilter(object): """ Water filter constants """ # These constants are arbitrary and may change. # However bool(NONE) is guaranteed to be False NONE = 0 HAMMING = 1 FIR = 2 HLSVD = 3 # Items for the spectral processing options dropdown choices = ordered_dict(( (NONE, "None"), (HAMMING, "Hamming"), (FIR, "FIR"), (HLSVD, "HLSVD"), )) # FIR (Finite Impulse Response) constants MIN_FIR_RIPPLE = 0 MAX_FIR_RIPPLE = 500 MIN_FIR_HALF_WIDTH = 0 MAX_FIR_HALF_WIDTH = 500 # HLSVD constants MIN_HLSVD_DATA_POINTS = 1 MAX_HLSVD_DATA_POINTS = 2048 MIN_HLSVD_SINGULAR_VALUES = 1 MAX_HLSVD_SINGULAR_VALUES = 50 MIN_HLSVD_MATRIX_POINTS = 1 MAX_HLSVD_MATRIX_POINTS = 1024 MIN_HLSVD_ITERATIONS = 1 MAX_HLSVD_ITERATIONS = 2000
table = [ val for i, val in enumerate(p_x, start=0) if i not in rows_to_remove ] if len(table) != 1: table = cls.normalize(table) if printer is not None: printer(table) return table if __name__ == '__main__': parents = ordered_dict({ 'A': (), 'B': ('A'), 'C': ('A'), 'D': ('B'), 'E': ('B', 'C'), 'F': ('C') }) probs = ordered_dict({ 'A': { '+a': .3, '-a': .7 }, 'B|A': { '+a+b': .7, '+a-b': .3, '-a+b': .6, '-a-b': .4 }, 'C|A': {
import telethon.tl.types ChatId, MsgId = int, int ImportRef = str TG_APP_ID = os.getenv("TG_APP_ID") TG_APP_HASH = os.getenv("TG_APP_HASH") FUNKWHALE_APP_TOKEN = os.getenv("FUNKWHALE_APP_TOKEN") FUNKWHALE_BASE_URL = os.getenv("FUNKWHALE_BASE_URL") if any(not x for x in (TG_APP_ID, TG_APP_HASH, FUNKWHALE_APP_TOKEN, FUNKWHALE_BASE_URL)): raise Exception("Missing/empty env vars") _ref_cache: OrderedDict[Tuple[ChatId, MsgId], ImportRef] = ordered_dict() def cache_ref(import_ref: ImportRef, chat_id: ChatId, msg_id: MsgId, cache_size=200): _ref_cache[(chat_id, msg_id)] = import_ref logging.debug(f"caching import_ref {import_ref} for {(chat_id, msg_id)}") while len(_ref_cache) > cache_size: _ref_cache.popitem(False) async def get_ref(chat_id: ChatId, msg_id: MsgId, timeout=120) -> Optional[ImportRef]:
from InfByEnum import InfByEnum from BayesNet import BayesNet from VariableElim import VariableElim from collections import OrderedDict as ordered_dict if __name__ == '__main__': parents = ordered_dict({ 'C': (), 'D': ('C', 'M', 'T'), 'M': ('W'), 'T': ('W'), 'W': () }) probs = ordered_dict({ 'C': { '+c': .5, '-c': .5 }, 'D|CMT': { '+c+d+m+t': .9, '+c+d+m-t': .8, '+c+d-m+t': .8, '+c+d-m-t': .2, '+c-d+m+t': .1, '+c-d+m-t': .2, '+c-d-m+t': .2, '+c-d-m-t': .8, '-c+d+m+t': .8, '-c+d+m-t': .5, '-c+d-m+t': .6, '-c+d-m-t': .1,
from collections import OrderedDict as ordered_dict escaped_glob_tokens_to_re = ordered_dict( ( # Order of ``**/`` and ``/**`` in RE tokenization pattern doesn't matter because ``**/`` will be caught first no matter what, making ``/**`` the only option later on. # W/o leading or trailing ``/`` two consecutive asterisks will be treated as literals. ( "/\*\*", "(?:/.+?)*", ), # Edge-case #1. Catches recursive globs in the middle of path. Requires edge case #2 handled after this case. ( "\*\*/", "(?:^.+?/)*", ), # Edge-case #2. Catches recursive globs at the start of path. Requires edge case #1 handled before this case. ``^`` is used to ensure proper location for ``**/``. ( "\*", "[^/]*?", ), # ``[^/]*?`` is used to ensure that ``*`` won't match subdirs, as with naive ``.*?`` solution. ("\?", "."), ("\[\*\]", "\*"), # Escaped special glob character. ("\[\?\]", "\?"), # Escaped special glob character. ( "\[!", "[^", ), # Requires ordered dict, so that ``\[!`` preceded ``\[`` in RE pattern. Needed mostly to differentiate between ``!`` used within character class ``[]`` and outside of it, to avoid faulty conversion. ("\[", "["), ("\]", "]"), ) ) escaped_glob_replacement = re.compile(
def parse_str(kif_str): line_no = 1 names = [None, None] pieces_in_hand = [ordered_dict(), ordered_dict()] current_turn = shogi.BLACK sfen = shogi.STARTING_SFEN moves = [] last_to_square = None win = None custom_sfen = False kif_str = kif_str.replace('\r\n', '\n').replace('\r', '\n') for line in kif_str.split('\n'): if len(line) == 0 or line[0] == "*": pass elif line.count('+') == 2 and line.count('-') > 10: if custom_sfen: custom_sfen = False # remove last slash sfen = sfen[:-1] else: custom_sfen = True sfen = '' elif custom_sfen: sfen = ''.join((sfen, Parser.parse_board_line(line), '/')) elif ':' in line: (key, value) = line.split(':', 1) value = value.rstrip(' ') if key == '先手' or key == '下手': # sente or shitate # Blacks's name names[shogi.BLACK] = value elif key == '後手' or key == '上手': # gote or uwate # White's name names[shogi.WHITE] = value elif key == '先手の持駒' or \ key == '下手の持駒': # sente or shitate's pieces in hand # First player's pieces in hand pieces_in_hand[shogi.BLACK] = Parser.parse_pieces_in_hand( value) elif key == '後手の持駒' or \ key == '上手の持駒': # gote or uwate's pieces in hand # Second player's pieces in hand pieces_in_hand[shogi.WHITE] = Parser.parse_pieces_in_hand( value) elif key == '手合割': # teai wari sfen = Parser.HANDYCAP_SFENS[value] if sfen is None: raise ParserException( 'Cannot support handycap type "other"') elif line == '後手番': # Current turn is white current_turn = shogi.WHITE else: (move, last_to_square, special_str) = Parser.parse_move_str(line, last_to_square) if move is not None: moves.append(move) if current_turn == shogi.BLACK: current_turn = shogi.WHITE else: # current_turn == shogi.WHITE current_turn = shogi.BLACK elif special_str in ['投了', '詰み', '切れ負け', '反則負け']: if current_turn == shogi.BLACK: win = 'w' else: # current_turn == shogi.WHITE win = 'b' elif special_str in ['反則勝ち', '入玉勝ち']: if current_turn == shogi.BLACK: win = 'b' else: # current_turn == shogi.WHITE win = 'w' elif special_str in ['持将棋', '先日手']: win = '-' else: m = Parser.RESULT_RE.match(line) if m: win_side_str = m.group(3) if win_side_str == '先' or win_side_str == '下': win = 'b' elif win_side_str == '後' or win_side_str == '上': win = 'w' else: # TODO: repetition of moves with continuous check win = '-' line_no += 1 # if using a custom sfen if len(sfen.split(' ')) == 1: sfen = Parser.complete_custom_sfen(sfen, pieces_in_hand, current_turn) summary = {'names': names, 'sfen': sfen, 'moves': moves, 'win': win} # NOTE: for the same interface with CSA parser return [summary]