Esempio n. 1
0
    def __init__(self, target=3, starting_positions=[0, 0], space_size=50):
        self.action_space = Tuple([Discrete(2), Discrete(2)])
        self.starting_positions = starting_positions
        self.space_size = space_size
        self.observation_space = Tuple([Discrete(space_size), Discrete(space_size)])

        self.target = target
        self.done = False
        self.reset()
Esempio n. 2
0
 def __init__(self, db=None) -> None:
     self.snakes: List[game.Snake] = []
     self.foods: List[game.Food] = []
     self.walls: List[game.Object] = []
     self.max_food = 1
     self.boundariesx: Tuple(int, int) = (0, 0)
     self.boundariesy: Tuple(int, int) = (0, 0)
     self.db = db
     #save directions in a dictionary of [id, direction]
     self.directions: Dict[Id, Direction] = {}
def save_speedy_as_nc(variables_SPEEDY: Dict[str,np.ndarray]) -> None:

    SPEEDY_atmospherical_variables_to_netcdf: Dict[str,Tuple(Tuple(str,str,str),np.ndarray)] = dict()
    SPEEDY_pressure_to_netcdf: Dict[str,Tuple(Tuple(str,str),np.ndarray)]  = dict()

    pressure: np.ndarray = variables_SPEEDY.pop('pres', None)

    for key, value in variables_SPEEDY.items():
        SPEEDY_atmospherical_variables_to_netcdf[key] = (
            ("level", "lat", "lon"), value)

    speedy_atmospherical_dataset: xr.Dataset = xr.Dataset(
        SPEEDY_atmospherical_variables_to_netcdf,
        coords={
            "level": PRESSURE_LEVELS_VALUES,
            "lat": Y_SPEEDY_LAT,
            "lon": X_SPEEDY_LON,
        },
        attrs={
            'long_name': '6-Hourly Sample',
            'Levels': 7,
            'dataset': 'NCEP/DOE AMIP-II Reanalysis (Reanalysis-2)',
            'level_desc': 'Surface',
            'statistic': 'Individual Obs',
        },
    )

    SPEEDY_pressure_to_netcdf['pres'] = (("lat", "lon"), pressure)
    SPEEDY_pressure_dataset: xr.Dataset = xr.Dataset(
        SPEEDY_pressure_to_netcdf,
        coords={
            "lat": Y_SPEEDY_LAT,
            "lon": X_SPEEDY_LON,
        },
        attrs={
            'long_name': '6-Hourly Pressure at Surface',
            'Levels': 1,
            'units': 'Pascals',
            'precision': -1,
            'GRIB_id': 1,
            'GRIB_name': 'PRES',
            'var_desc': 'Pressure',
            'dataset': 'NCEP/DOE AMIP-II Reanalysis (Reanalysis-2)',
            'level_desc': 'Surface',
            'statistic': 'Individual Obs',
            'parent_stat': 'Other',
            'standard_name': 'pressure',
        },
    )

    speedy_atmospherical_dataset.to_netcdf(
        INTERPOLATIONS_PATH/('SPEEDY-'+FILENAME + "-atmospherical_dataset.nc"))
    SPEEDY_pressure_dataset.to_netcdf(
        INTERPOLATIONS_PATH/('SPEEDY-'+FILENAME + "-pressure_dataset.nc"))
Esempio n. 4
0
 def __init__(self, mat: List[List[Tuple(int, int, int)]]) -> None:
     i = len(mat)
     j = len(mat[0])
     for l in mat:
         if len(l) != j:
             raise ValueError("Les lignes ne sont pas de la même longueur")
     self.mat = mat
Esempio n. 5
0
    def add_margin(self, image, margin_size: int, margine_side: str,
                   margin_color: Tuple(int, int, int)):
        """This module adds a colored margin to the image.
        :image: a cv image
        :mrgin_size: If the side is right or left size is the width. If the
         margine_side is top or bottom size is the height.
        :margine_side: it determines whether the margin is added to right,
        left, top, or bottom.
        :color: It determines background color od the margin
        """
        height, width, channels = image.shape
        if margine_side in (SIDES.TOP, SIDES.BOTTOM):
            height = margin_size
            axis = 0
        elif margine_side in (SIDES.RIGHT, SIDES.LEFT):
            width = margin_size
            axis = 1
        else:
            raise execptions.WrongSide('Side is wrong')

        blank_image = np.zeros((height, width, channels), np.uint8)
        if margine_side in (SIDES.TOP, SIDES.LEFT):
            first_image = blank_image
            second_image = image
        elif margine_side in (SIDES.BOTTOM, SIDES.LEFT):
            first_image = image
            second_image = blank_image
        else:
            raise execptions.WrongSide('Side is wrong')

        return np.concatenate((first_image, second_image), axis=axis)
Esempio n. 6
0
    def _insert_meta(self, axis: int, name: str, values: Union[List(str),
                                                               Tuple(str)],
                     replace: bool) -> Adat:

        adat = self.copy()
        if axis == 0:
            if not replace and name in adat.index.names:
                raise AdatKeyError(
                    'Name already exists in index, use `adat.replace_meta` instead.'
                )
            elif replace and name not in adat.index.names:
                raise AdatKeyError(
                    'Name does not exists in index, use `adat.insert_meta` instead.'
                )
            index_df = adat.index.to_frame()
            index_df[name] = values
            adat.index = pd.MultiIndex.from_frame(index_df)

        elif axis == 1:
            if not replace and name in adat.columns.names:
                raise AdatKeyError(
                    'Name already exists in columns, use `adat.replace_meta` instead.'
                )
            elif replace and name not in adat.columns.names:
                raise AdatKeyError(
                    'Name does not exists in columns, use `adat.insert_meta` instead.'
                )

            columns_df = adat.columns.to_frame()
            columns_df.loc[:, name] = values
            adat.columns = pd.MultiIndex.from_frame(columns_df)

        return adat
Esempio n. 7
0
    def _filter_meta(self, axis: int, names: Union[List(str),
                                                   Set(str),
                                                   Tuple(str)],
                     include: bool) -> Adat:

        # Check to see if names is the right variable type
        if not isinstance(names, (list, tuple, set)):
            raise TypeError('"values" must be a list, tuple, or set.')
        else:
            names = set(names)

        # Make a copy of the df (what we will eventually return) & grab the multiindex
        adat = self.copy()
        metadata = get_pd_axis(adat, axis)

        # Double check to make sure names exist in multiindex
        for name in names:
            if name not in metadata.names:
                raise AdatKeyError(f'Name, "{name}", not found in multiindex')

        # Filter down the metadata
        for name in metadata.names:
            if name not in names and include:
                metadata = metadata.droplevel(name)
            if name in names and not include:
                metadata = metadata.droplevel(name)

        # Assign the metadata to the appropriate place
        if axis == 0:
            adat.index = metadata
        else:
            adat.columns = metadata

        return adat
Esempio n. 8
0
    def replace_meta(self, axis: int, name: str,
                     values: Union[List(str), Tuple(str)]) -> Adat:
        """Returns an adat with the given metadata/multiindices added.

        Metadata/multiindex must already exist in the adat.

        Parameters
        ----------
        axis : int
            The metadata/multiindex to operate on:
            0 - row metadata,
            1 - column metadata

        name : str
            The name of the index to be added.

        values : List(str) | Tuple(str)
            Values to be added to the metadata/multiindex.  Can be a tuple or list

        Returns
        -------
        adat : Adat

        Examples
        --------
        >>> new_adat = adat.replace_meta(axis=0, name='Barcode', values=[1, 2, 3, 4])
        >>> new_adat = adat.replace_meta(axis=1, name='Type', values=['Protein', 'Protein'])
        """
        return self._insert_meta(axis, name, values, replace=True)
Esempio n. 9
0
    def exclude_meta(self, axis: int, names: Union[List(str),
                                                   Set(str),
                                                   Tuple(str)]) -> Adat:
        """Returns an adat with excluded metadata/multiindices given the names to exclude.

        Parameters
        ----------
        axis : int
            The metadata/multiindex to operate on:
            0 - row metadata,
            1 - column metadata

        names : List(str) | Set(str) | Tuple(str)
            The names to filter on.  Can be a tuple, list, or set.

        Returns
        -------
        adat : Adat

        Examples
        --------
        >>> new_adat = adat.exclude_meta(axis=0, names=['Barcode'])
        >>> new_adat = adat.exclude_meta(axis=1, names=['SeqId'])
        """

        return self._filter_meta(axis, names, include=False)
Esempio n. 10
0
    def pick_on_meta(self, axis: int, name: str,
                     values: Union[List(str), Set(str),
                                   Tuple(str)]) -> Adat:
        """Returns an adat with rfu rows or columns excluded given the multiindex name and values to keep.

        Parameters
        ----------
        axis : int
            The metadata/multiindex to operate on:
            0 - row metadata,
            1 - column metadata

        name : str
            The name of the metadata/multiindex row/column to filter based on.

        values : List(str) | Set(str) | Tuple(str)
            The values to filter on.  Can be a tuple, list, or set.

        Returns
        -------
        adat : Adat

        Examples
        --------
        >>> new_adat = adat.pick_on_meta(axis=0, name='Barcode', values=['00001'])
        >>> new_adat = adat.pick_on_meta(axis=1, name='SeqId', values=['10000-01', '12345-10'])
        >>> new_adat = adat.pick_on_meta(axis=1, name='Type', values=['Spuriomer'])
        """

        return self._filter_on_meta(axis, name, values, include=True)
Esempio n. 11
0
    def delete_queryset(self, request, queryset):
        media_assets = list(get_media_assets(queryset))

        bucket_map = defaultdict(list)
        for asset in media_assets:
            bucket_map[asset.dataset.bucket.name].append(asset)

        error_keys: Set[Tuple(str, str)] = set()
        for bucket, assets in bucket_map.items():
            file_path_to_remove = ([asset.full_path for asset in assets] +
                                   [asset.full_label_path for asset in assets])
            delete_errors = delete_files_in_s3(
                bucket,
                file_path_to_remove,
            )
            error_keys |= set((bucket, error.key) for error in delete_errors)

        (MediaAsset.objects.filter(pk__in=[
            asset.pk for asset in media_assets
            if (asset.dataset.bucket.name, asset.full_path) not in error_keys
        ], ).delete())
        (queryset.exclude(pk__in=set(
            asset.dataset.pk for asset in media_assets
            if (asset.dataset.bucket.name,
                asset.full_path) in error_keys), ).delete())
Esempio n. 12
0
def binary_search(a: Sequence[T], n: Number,
                  f: Callable[[T], Number]) -> Tuple(T, i):
    '''
    Searches a sequence of elements Sequence[T] and returns the element T
    that, when called with a function f(T), gives the number N.

    Parameters:
        a: a Sequence of elements of type T. The codomain must be sorted,
           in the sense that f[m] <= f[n] for all m < n.
        f: a Function that takes an element T and returns a Number.
        n: a Number. Note that there must exist some element T such that f(T) = N.

    Returns:
        A tuple of(T, i), where i is the position of T in the Sequence.
    '''
    pass

    lo = 0
    hi = len(a)
    while lo < hi:
        mid = (lo + hi) // 2
        if f(a[mid]) < n:
            lo = mid + 1
        elif f(a[mid]) > n:
            hi = mid
        else:  # We found it!
            return (a[mid], mid)

    return ValueError
Esempio n. 13
0
class ReviewTextInfo(NamedTuple):
    """商品レビュー内の1文に関する情報

  Attributes:
    review_id (int): レビュー番号
    last_review_id (int): 最後のレビュー番号
    text_id (int): 文番号
    last_text_id (int): 最後の文番号
    star (float): 評価
    title (str): レビューのタイトル
    review (str): レビュー全文
    text (str): 対象としている文
    result Optional(Dict[str, Tuple(AttrExtractionResult, ...)]): 抽出結果
  """
    review_id: int
    last_review_id: int
    text_id: int
    last_text_id: int
    star: float
    title: str
    review: str
    text: str
    result: Optional(Dict[str, Tuple(AttrExtractionResult, ...)])

    @classmethod
    def from_dictionary(cls, dictionary: Dict[str, Any]):
        this = cls(**dictionary)
        result_dict = this.result
        for attr, results in result_dict.items():
            result_dict[attr] = AttrExtractionResult(*results)

        return this._replace(result=result_dict)
Esempio n. 14
0
    def rollup(self, *terms, **kwargs):
        for_mysql = 'mysql' == kwargs.get('vendor')

        if self._mysql_rollup:
            raise AttributeError("'Query' object has no attribute '%s'" % 'rollup')

        terms = [Tuple(*term) if isinstance(term, (list, tuple, set))
                 else term
                 for term in terms]

        if for_mysql:
            # MySQL rolls up all of the dimensions always
            if not terms and not self._groupbys:
                raise RollupException('At least one group is required. Call Query.groupby(term) or pass'
                                      'as parameter to rollup.')

            self._mysql_rollup = True
            self._groupbys += terms

        elif 0 < len(self._groupbys) and isinstance(self._groupbys[-1], Rollup):
            # If a rollup was added last, then append the new terms to the previous rollup
            self._groupbys[-1].args += terms

        else:
            self._groupbys.append(Rollup(*terms))
Esempio n. 15
0
 def test_no_tuple_instantiation(self):
     with self.assertRaises(TypeError):
         Tuple()
     with self.assertRaises(TypeError):
         Tuple[T]()
     with self.assertRaises(TypeError):
         Tuple[int]()
Esempio n. 16
0
def get_sentence_ranks(sentences: List[str],
                       tokens_per_sentence: List[SentList],
                       paragraphs: List[str],
                       tokens: List[str]) -> List[Tuple(int, str)]:
    """
    Returns a list of sentences, ordered by importance.

    Input(s):
    1) sentences - List of all sentences.
    2) tokens_per_sentence - 2d List containing tokens grouped
                             by sentence.
    3) paragraphs - List of all paragraphs.
    4) tokens - List of all unique, important tokens.

    Output(s):
    1) sentences - List containing sentences and their ranks (as tuples)
                   sorted in descending order by rank.
    """

    tf = get_tf(tokens_per_sentence)
    idf = get_idf(paragraphs, tokens)

    ranks = []
    for sent in range(len(sentences)):
        score = 0
        for token in tokens_per_sentence[sent]:
            if token in tokens:
                score += tf[token] * idf[token]
        ranks.append(score)

    sentences = sorted(list(zip(ranks, sentences)), reverse=True)

    return sentences
Esempio n. 17
0
def preprocess(
        text: str) -> Tuple(List[str], List[str], List[str], List[SentTok]):
    """
    Handles all the preprocessing required for ranking and scoring.

    Argument(s):
    1) text - The original body of text.

    Output(s):
    1) paragraphs - List of all paragraphs in original text.
    2) sentences - List of all sentences in original text.
    3) lemmatized_tokens - All unique, lemmatized tokens without
                           any stopwords.
    4) lemmatized_token_sentences - All unique, lemmatized tokens,
                                    grouped by sentence.
    """

    paragraphs = get_paragraphs(text)

    sentences = get_sentences(text)
    tokens_per_sentence, original_tokens = get_tokens(text)

    cleaned_tokens = clean(original_tokens)

    lemmatized_tokens = lemmatize_tokens(tokens=cleaned_tokens)
    lemmatized_token_sentences = lemmatize_tokens(
        tokens_per_sentence=tokens_per_sentence)

    return paragraphs, sentences, lemmatized_tokens, lemmatized_token_sentences
Esempio n. 18
0
    def parse(self, words: List[str]) -> Tuple(ArrCKY, ArrReverseCKY):

        # Count number of words in sentence
        n = len(words)

        # Create n by n matrix of empty sets
        matrix = [[set() for _ in range(n)] for _ in range(n)]

        # Used for backtracking to determine component constituents
        constituents = [[defaultdict(list) for _ in range(n)]
                        for _ in range(n)]

        for j in range(1, n):

            for A in self.reverseTerminals[words[j]]:
                matrix[j - 1][j] = matrix[j - 1][j].union([A])
                constituents[j - 1][j][A] = None

            for i in reversed(range(j)):
                for k in range(i, j):
                    for B in matrix[i][k]:
                        for C in matrix[k][j]:
                            for A in self.reverseNonTerminals[tuple([B, C])]:
                                matrix[i][j] = matrix[i][j].union([A])
                                constituents[i][j][A].append(
                                    tuple([i, k, j, B, C]))

        return matrix, constituents
Esempio n. 19
0
 def _mosaic_divide_points(self) -> Tuple(int, int):
     """Returns a  tuple of x and y which corresponds to mosaic divide points."""
     x_point = tf.random.uniform(
         shape=[1],
         minval=tf.cast(
             self.out_size[0] * (self._minimum_mosaic_image_dim / 100),
             tf.int32),
         maxval=tf.cast(
             self.out_size[0] *
             ((100 - self._minimum_mosaic_image_dim) / 100),
             tf.int32,
         ),
         dtype=tf.int32,
     )
     y_point = tf.random.uniform(
         shape=[1],
         minval=tf.cast(
             self.out_size[1] * (self._minimum_mosaic_image_dim / 100),
             tf.int32),
         maxval=tf.cast(
             self.out_size[1] *
             ((100 - self._minimum_mosaic_image_dim) / 100),
             tf.int32,
         ),
         dtype=tf.int32,
     )
     return x_point, y_point
Esempio n. 20
0
def read_txt(txt: str,
             encoding: str = 'utf-8') -> Tuple(List[str], pandas.DataFrame):
    """指定されたフォーマットで書かれた、 txt ファイルを読み込む
  フォーマットに関しては次の通り

  # 「# 」から始まる行はコメントとして扱われる
  # DetaFrameのヘッダ情報(商品カテゴリ, 商品一覧ページのページ数, 商品一覧ページ)
  category, last_page, link
  # 次の行から「, 」区切りでヘッダ情報に基づいた値が埋められる(下記は例)
  camera, 10, https://www.amazon.com

  Args:
    txt (str): 上記フォーマットで記述された txt ファイル
    encoding (str): ファイルエンコーディング

  Returns:
    txt で与えられた情報をまとめたもの
  """
    with open(txt, mode='r', encoding=encoding) as fp:
        content = [
            line.strip().split(', ') for line in fp.readlines()
            if not (line == ('\n')) and not (line.startswith('# '))
        ]

    columns, data = content[0], content[1:]
    return columns, pandas.DataFrame(data, columns=columns)
Esempio n. 21
0
    def add_neighbor(self, edge: "Edge") -> None:
        """
        Adds a new neighbor to the node.

        Arguments:
            edge (Edge): The edge that would connect this node with its neighbor.
        """
        if edge is None or (edge.source != self and edge.target != self):
            return

        if edge.source == self:
            other: Node = edge.target
        elif edge.target == self:
            other: Node = edge.source
        else:
            raise ValueError("Tried to add a neighbor with an invalid edge.")

        edge_key: Tuple(int, int) = edge.key

        # The graph is considered undirected, check neighbor existence accordingly.
        if self._neighbors.get(edge_key) or self._neighbors.get(
            (edge_key[1], edge_key[0])):
            return  # The neighbor is already added.

        self._neighbors[edge_key] = edge
        self.dispatch_event(NeighborAddedEvent(other))
 def initialize_posts_wrappers(
         self) -> List[Tuple(Submission, SubmissionWrapper)]:
     posts_wrappers = []
     for url in self.urls:
         post = Submission(url)
         wrapper = SubmissionWrapper(post)
         posts_wrappers.append((post, wrapper))
     return posts_wrappers
Esempio n. 23
0
File: Token.py Progetto: Vvamp/VLang
    def next(self) -> Tuple(Token, TokenList):
        """Return the first token and a new TokenList without that token

        Returns:
            Tuple(Token, TokenList): A tuple of the next token in the list and a lsit without the token
        """

        return (self.tokenlist[0], TokenList(self.tokenlist[1:]))
Esempio n. 24
0
 def test_major(self) -> None:
     """test cases to test Major class"""
     self.path1: str = "/Users/bansripatel/Desktop/ssw-810"
     self.path2: str = (r"/Users/bansripatel/Desktop/ssw-810/"
                        r"810_startup/810_startup.db")
     self.rt: Repository = Repository(self.path1, self.path2, False)
     table3: List[Tuple(str, List[str], List[str])] = [majors.major_data()
                                                       for majors in self.
                                                       rt._majors_dict.
                                                       values()
                                                       ]
     m_table: List[Tuple(str, List[str],
                   List[str])] = [('SFEN', ['SSW 540', 'SSW 555',
                                            'SSW 810'], ['CS 501',
                                                         'CS 546']),
                                  ('CS', ['CS 546', 'CS 570'], ['SSW 565',
                                                                'SSW 810'])]
     self.assertEqual(m_table, table3)
Esempio n. 25
0
 def __getnewargs_ex__(self) -> Tuple(tuple, dict):
     return ((self.graph, self.values()),
             dict(
                 records=self.records,
                 full=self.full,
                 region=self.region,
                 timespan=self.timespan,
                 conform=False,
             ))
Esempio n. 26
0
 def __init__(self):
     # map associating a name/id with a Comm
     self._actions: Dict[str, Action] = {}
     self._buttons: Dict[str, Set[Tuple(Union[
         QPushButton, QtStateButton], str)]] = defaultdict(lambda: set())
     self._shortcuts: Dict[str, set[str]] = defaultdict(lambda: set())
     self.context = Context()  # Dict[str, Any] = {}
     self._stack: List[str] = []
     self._tooltip_include_action_name = False
Esempio n. 27
0
 def __init__(self):
     self.name: Optional[str] = None
     self.id: str = None
     self.credits: Tuple(int, int) = None
     self.description: str = None
     self.semsters: List[str] = None
     self.course_sections: List[NCSUCourseSection] = None
     self.course_status: str = None
     self.course_prereq: str = None
Esempio n. 28
0
 def _fromrep(cls, rep):
     rows, cols = rep.shape
     flat_list = rep.to_sympy().to_list_flat()
     obj = Basic.__new__(cls, Integer(rows), Integer(cols),
                         Tuple(*flat_list, sympify=False))
     obj._rows = rows
     obj._cols = cols
     obj._rep = rep
     return obj
Esempio n. 29
0
def find_shared_pts(
        pts1: List[Point],
        pts2: List[Point]) -> Tuple(Set(Point), Set(Point), Set(Point)):
    a = {Point(p.x, p.y) for p in pts1}
    b = {Point(p.x, p.y) for p in pts2}
    both_ab = a.intersection(b)
    just_a = a.difference(both_ab)
    just_b = b.difference(both_ab)
    return (just_a, both_ab, just_b)
Esempio n. 30
0
def main() -> None:
    '''Prints out a triangle classification based on the sample inputs.
        Triangle classification includes: equilateral, isosceles, scalene, right'''

    sample_inputs: List[Tuple(int, int, int)] = [(3, 3, 3), (4, 4, 3),
                                                 (5, 7, 9), (4, 3, 5)]

    for inputs in sample_inputs:
        side_a, side_b, side_c = inputs
        print(classify_triangle(side_a, side_b, side_c))