Example #1
0
 def build(**kwargs):
     wg = WikiGraph()
     wg.wiki = kwargs["wiki"]
     kwargs["version"] = dt.resolve_version(wg.wiki, kwargs["version"])
     wg.version = kwargs["version"]
     p, r, d, c, cl = _make_graph_components(**kwargs)
     wg._pages = frozenbidict(p)
     wg._redirects = r
     wg._disambiguations = frozenbidict(d)
     wg._categories = frozenbidict(c)
     wg._category_links = cl
     pages = wg.pages(redirect=True, disambi=True)
     wg._wpd.build(pages)
     return wg
Example #2
0
class Tags(DictMixin):
    FIELD_MAP = frozenbidict()

    def __getitem__(self, key):
        k = self.FIELD_MAP.get(key, key)

        return super().__getitem__(k)

    def __setitem__(self, key, value):
        k = self.FIELD_MAP.get(key, key)

        return super().__setitem__(k, value)

    def __delitem__(self, key):
        k = self.FIELD_MAP.get(key, key)

        return super().__delitem__(k)

    def __iter__(self):
        return iter(
            self.FIELD_MAP.inv.get(k, k) for k in self.__dict__
            if not k.startswith('_') and not k == 'FIELD_MAP')

    def __repr__(self, repr_dict=None):
        repr_dict = {
            self.FIELD_MAP.inv.get(k, k): v
            for k, v in self.__dict__.items()
            if not k.startswith('_') and not k == 'FIELD_MAP'
        }

        return super().__repr__(repr_dict=repr_dict)
Example #3
0
def make_representation(*, _cfg):
    representation = _cfg['representation'].configure(
        EventRepresentationProcessor,
        timing=_cfg['timing'].configure(BeatShiftTiming),
        use_end_of_sequence_event=True)
    vocab = dict(representation.vocab)
    start_id = max(vocab.values()) + 1
    vocab[('bos', )] = start_id
    end_id = vocab[('eos', )]
    representation.vocab = bidict.frozenbidict(vocab)

    return representation, start_id, end_id
Example #4
0
class Tags(AttrMapping):
    """Base class for tags objects.

	Attributes:
		FIELD_MAP (frozenbidict): A mapping of format-specific
			field names to common aliases.
	"""

    FIELD_MAP = frozenbidict()

    def __getattr__(self, attr):
        a = self.FIELD_MAP.get(attr, attr)

        return super().__getattr__(a)

    def __setattr__(self, attr, value):
        a = self.FIELD_MAP.get(attr, attr)

        super().__setattr__(a, value)

    def __delattr__(self, attr):
        a = self.FIELD_MAP.get(attr, attr)

        super().__delattr__(a)

    def __getitem__(self, key):
        k = self.FIELD_MAP.get(key, key)

        return super().__getitem__(k)

    def __setitem__(self, key, value):
        k = self.FIELD_MAP.get(key, key)

        super().__setitem__(k, value)

    def __delitem__(self, key):
        k = self.FIELD_MAP.get(key, key)

        super().__delitem__(k)

    def __iter__(self):
        return iter(
            self.FIELD_MAP.inv.get(k, k) for k in self.__dict__
            if not k.startswith('_') and not k == 'FIELD_MAP')

    def __repr__(self, repr_dict=None):
        repr_dict = {
            self.FIELD_MAP.inv.get(k, k): v
            for k, v in self.__dict__.items()
            if not k.startswith('_') and not k == 'FIELD_MAP'
        }

        return super().__repr__(repr_dict=repr_dict)
    def __init__(self, **n_states):
        self.state_names = list(n_states.keys())
        state_sizes = list(n_states.values())

        self.n_states = state_sizes
        self.n_dimensions = len(self.n_states)
        self.total_states = np.prod(self.n_states)

        if self.n_dimensions > 1:
            mesh = self.get_states_mesh().squeeze().T
            mesh = list(map(tuple, mesh))
            index = np.arange(len(mesh))
            self.values_dict = frozenbidict(zip(mesh, index))
Example #6
0
def test_Tags():
    test_tags = Tags(key1='value1', key2='value2')
    test_tags.FIELD_MAP = frozenbidict({'artist': 'key1', 'title': 'key2'})

    assert test_tags['artist'] == test_tags['key1']
    assert test_tags['title'] == test_tags['key2']

    test_tags['key3'] = 'value3'
    del test_tags['key3']
    assert 'key3' not in test_tags

    assert list(iter(test_tags)) == ['artist', 'title']

    assert repr(
        test_tags) == "<Tags ({'artist': 'value1', 'title': 'value2'})>"
Example #7
0
class RIFFTags(Tags):
	FIELD_MAP = frozenbidict(
		{
			'album': 'IPRD',
			'artist': 'IART',
			'comment': 'ICMT',
			'copyright': 'ICOP',
			'date': 'ICRD',
			'encodedby': 'IENC',
			'genre': 'IGNR',
			'language': 'ILNG',
			'rating': 'IRTD',
			'title': 'INAM',
			'tracknumber': 'ITRK'
		}
	)

	@datareader
	@classmethod
	def load(cls, data):
		if data.read(4) != b'INFO':
			raise InvalidChunk('Valid RIFF INFO chunk not found.')

		fields = {}

		field = data.read(4)
		while len(field):
			size = struct.unpack('I', data.read(4))[0]
			value = data.read(size).strip(b'\x00').decode('utf-8')
			fields[field.decode('utf-8')] = value

			b = data.read(1)
			while b == b'\x00':
				b = data.read(1)

			if b:
				data.seek(-1, os.SEEK_CUR)

			field = data.read(4)

		return cls(fields)
Example #8
0
class RIFFTags(Tags):
    FIELD_MAP = frozenbidict({
        'album': 'IPRD',
        'artist': 'IART',
        'comment': 'ICMT',
        'copyright': 'ICOP',
        'date': 'ICRD',
        'encodedby': 'IENC',
        'genre': 'IGNR',
        'language': 'ILNG',
        'rating': 'IRTD',
        'title': 'INAM',
        'tracknumber': 'ITRK'
    })

    @classmethod
    def load(cls, data):
        if not isinstance(data, DataReader):  # pragma: nocover
            data = DataReader(data)

        fields = {}

        field = data.read(4)
        while len(field):
            size = struct.unpack('I', data.read(4))[0]
            value = data.read(size).strip(b'\x00').decode('utf-8')
            fields[field.decode('utf-8')] = value

            b = data.read(1)
            while b == b'\x00':
                b = data.read(1)

            if b:
                data.seek(-1, os.SEEK_CUR)

            field = data.read(4)

        return cls(fields)
import pytest
from collections import Counter, OrderedDict, defaultdict
from bidict import bidict, frozenbidict, namedbidict
from itertools import product


class dictsubclass(dict):
    pass


d = dict(H='hydrogen', He='helium')
c = Counter(d)
o = OrderedDict(d)
dd = defaultdict(int, d)
s = dictsubclass(d)

b = bidict(d)
f = frozenbidict(d)
n = namedbidict('named', 'keys', 'vals')(d)

dicts = (d, c, o, dd, s)
bidicts = (b, f, n)


@pytest.mark.parametrize('d, b', product(dicts, bidicts))
def test_eq(d, b):
    assert d == b
Example #10
0
                    yield Diagnostic(
                        Diagnostic.Messages.ARRAY_INDEX_WRONG_ORDER,
                        self.variable_name,
                        parseinfo=self.ast.parseinfo)
                last_index = idx

    def validate(self, lvalue=False):
        if not self.variable:
            yield Diagnostic(Diagnostic.Messages.VARIABLE_NOT_DECLARED,
                             self.variable_name,
                             parseinfo=self.ast.parseinfo)
        elif self.variable not in self.context.initialized_variables and not lvalue:
            yield Diagnostic(Diagnostic.Messages.VARIABLE_NOT_INITIALIZED,
                             self.variable.name,
                             parseinfo=self.ast.parseinfo)
        elif isinstance(self.variable.value_type, ArrayType):
            if self.variable not in self.context.allocated_variables_mapping:
                yield Diagnostic(Diagnostic.Messages.VARIABLE_NOT_ALLOCATED,
                                 self.variable.name,
                                 parseinfo=self.ast.parseinfo)
            for index in self.indices:
                yield from index.validate()
            if lvalue:
                yield from self.validate_array_indexes()


expression_classes = frozenbidict({
    "int_literal": IntLiteralExpression,
    "reference": ReferenceExpression,
})
Example #11
0
class ImageDataGeneration:
    """It has functionality to create generators to feed data to keras.
    """
    valid_subsets = frozenbidict({
        'training': ImageDataSubset.Training,
        'validation': ImageDataSubset.Validation,
        'prediction': ImageDataSubset.Prediction
    })

    def __init__(self,
                 dataframe,
                 input_params,
                 image_generation_params,
                 transformer=None,
                 randomize=True):
        """It initializes the dataframe object.

        Arguments:
            dataframe {Pandas DataFrame} -- A pandas dataframe object with columnar data with image names and labels.
            input_params {A InputDataParameter object} -- An input parameter object.
            image_generation_params {A ImageGenerationParameters object} -- A training data parameter object.
            transformer {A ImageDataTransformation object} -- It is used to transform the image objects.
            randomize {boolean} -- It indicates randomization of the input dataframe.
        """
        #Required parameters
        self._dataframe = dataframe
        self._input_params = input_params
        self._image_generation_params = image_generation_params

        #Optional parameters
        self._transformer = transformer
        self._randomize = randomize

        #Caching
        self._image_cache = LRUCache(
            self._image_generation_params.image_cache_size)

        #Logging
        self._logger = logging.get_logger(__name__)

        #Metrics
        self._load_slice_metric = 'get_image_objects'

        #Create metrics
        Metric.create(self._load_slice_metric)

        #Compute the training and validation boundary using the validation split.
        boundary = int(
            ceil(
                len(self._dataframe) *
                (1. - self._image_generation_params.validation_split)))
        self._logger.info(
            "Validation split: {} Identified boundary: {}".format(
                self._image_generation_params.validation_split, boundary))

        #Split the dataframe into training and validation.
        self._main_df = self._dataframe.loc[:(boundary - 1), :]
        self._validation_df = self._dataframe.loc[boundary:, :].reset_index(
            drop=True)

        n_dataframe = len(self._dataframe)
        n_main_df = len(self._main_df)
        n_validation_df = len(self._validation_df)

        self._logger.info(
            "Dataframe size: {} main set size: {} validation size: {}".format(
                n_dataframe, n_main_df, n_validation_df))

    def _get_images(self, n_images):
        """It extracts the image names from the dataframe.

        Arguments:
            n_images {An numpy.array object} -- It is a 4-D numpy array containing image data.
        """
        df_size = len(self._main_df)
        loop_count = 0
        images = set()

        while len(images) <= n_images and loop_count < df_size:
            random_index = randrange(df_size)

            for image_col in self._image_generation_params.image_cols:
                images.add(self._main_df.loc[random_index, image_col])

            loop_count += 1

        return list(images)

    def fit(self, n_images):
        """It calculates statistics on the input dataset. These are used to perform transformation.

        Arguments:
            n_images {An numpy.array object} -- It is a 4-D numpy array containing image data.
        """
        if n_images <= 0:
            ValueError(
                "Expected a positive integer for n_images. Got: {}".format(
                    n_images))

        #Input list for data fitting
        images = self._get_images(n_images)

        self._logger.info("%d images to use for data fitting", len(images))

        #Image objects
        img_objs_map = self._get_image_objects(images)
        img_objs = np.asarray(list(img_objs_map.values()))

        self._logger.info(
            "fit:: images: {} to the transformer to compute statistics".format(
                img_objs.shape))

        #Fit the data in the transformer
        self._transformer.fit(img_objs)

    def flow(self, subset='training'):
        """It creates an iterator to the input dataframe.

        Arguments:
            subset {string} -- A string to indicate select between training and validation splits.
        """
        #Validate subset parameter
        if not ImageDataGeneration.valid_subsets.get(subset):
            raise ValueError("Valid values of subset are: {}".format(
                list(ImageDataGeneration.valid_subsets.keys())))

        #Qualified subset
        q_subset = ImageDataGeneration.valid_subsets[subset]

        #Dataframe placeholder
        dataframe = None

        #Pick the correct dataframe
        if q_subset == ImageDataSubset.Training or q_subset == ImageDataSubset.Prediction:
            dataframe = self._main_df
        elif q_subset == ImageDataSubset.Validation:
            dataframe = self._validation_df

        self._logger.info("flow:: subset: {} dataset size: {}".format(
            subset, len(dataframe)))

        return ImageDataIterator(self,
                                 dataframe,
                                 self._image_generation_params.batch_size,
                                 q_subset,
                                 randomize=self._randomize)

    def _load_subset_slice(self, df_slice, subset):
        """It loads the image objects and the labels for the data frame slice.

        Arguments:
            df_slice {A pandas.DataFrame object} -- A pandas DataFrame object containing input data and labels.

        Returns:
            {An object} -- A list of image objects in prediction phase. A tuple of image objects and their labels in training phase.
        """
        self._logger.info('Using subset: %s', subset)

        #Results placeholder
        results = None

        #Load the slice
        if subset == ImageDataSubset.Training or subset == ImageDataSubset.Validation:
            results = self._load_train_phase_slice(df_slice)
        elif subset == ImageDataSubset.Prediction:
            results = self._load_predict_phase_slice(df_slice)

        return results

    def _load_train_phase_slice(self, df_slice):
        """It loads the image objects and the labels for the data frame slice.

        Arguments:
            df_slice {A pandas.DataFrame object} -- A pandas DataFrame object containing input data and labels.

        Returns:
            (Numpy object, Numpy object) -- A tuple of input data and labels.
        """
        return self._load_slice(df_slice)

    def _load_predict_phase_slice(self, df_slice):
        """It loads the image objects for the data frame slice.

        Arguments:
            df_slice {A pandas.DataFrame object} -- A pandas DataFrame object containing input data and labels.

        Returns:
            (Numpy object, Numpy object) -- A tuple of input data and labels.
        """
        images, _ = self._load_slice(df_slice)

        return images

    def _load_slice(self, df_slice):
        """It loads the image objects for the data frame slice.

        Arguments:
            df_slice {A pandas.DataFrame object} -- A pandas DataFrame object containing input data and labels.

        Returns:
            (Numpy object, Numpy object) -- A tuple of input data and labels.
        """
        #Calculate the number of classes
        num_classes = self._image_generation_params.num_classes

        #Process labels
        df_slice_y = df_slice[self._image_generation_params.label_col].values
        df_slice_y_categorical = to_categorical(
            df_slice_y,
            num_classes=num_classes) if num_classes > 2 else df_slice_y

        #Process image columns
        df_slice_x = []

        for x_col in self._image_generation_params.image_cols:
            images = df_slice[x_col].tolist()

            #Load images
            img_objs_map = self._get_image_objects(images)

            #Arrange them in the input order
            img_objs = [img_objs_map[image] for image in images]
            img_objs = np.asarray(img_objs)

            if x_col in self._image_generation_params.image_transform_cols:
                img_objs = self._apply_transformation(img_objs)

            df_slice_x.append(img_objs)

        return (df_slice_x, df_slice_y_categorical)

    def _get_image_objects(self, images):
        """It loads the image objects for the list of images.
        If the image is available, it is loaded from the cache.
        Otherwise, it is loaded from the disk.

        Arguments:
            images {[string]} -- A list of image names.
        """
        #Start recording time
        record_handle = Metric.start(self._load_slice_metric)

        img_objs = {}
        candidate_images = set(images)
        for image in candidate_images:
            #Get the image object for the current image from the cache.
            #Add to the dictionary, if it is not None.
            img_obj = self._image_cache.get(image)

            if img_obj is not None:
                img_objs[image] = img_obj

        #Create a list of missing images.
        cached_images = set(img_objs.keys())
        missing_images = [
            image for image in candidate_images if not image in cached_images
        ]

        self._logger.debug("Cached images: {} missing images: {}".format(
            cached_images, missing_images))

        #Load the missing image objects, and apply parameters.
        missing_img_objs = utils.imload(
            self._image_generation_params.dataset_location, missing_images,
            self._image_generation_params.input_shape[:2])
        missing_img_objs = self._apply_parameters(missing_img_objs)

        #Update the cache
        self._image_cache.update(zip(missing_images, missing_img_objs))

        #Update the image object dictionary with the missing image objects.
        for image, img_obj in zip(missing_images, missing_img_objs):
            img_objs[image] = img_obj

        #End recording time
        Metric.stop(record_handle, self._load_slice_metric)

        return img_objs

    def _apply_parameters(self, img_objs):
        """It processes image objects based on the input parameters.
        e.g. normalization, reshaping etc.

        Arguments:
            img_objs {numpy.ndarray} -- A numpy array of image objects.
        """
        if self._image_generation_params.normalize:
            img_objs = utils.normalize(img_objs)

        return img_objs

    def _apply_transformation(self, img_objs):
        transformed_objects = img_objs

        if self._transformer:
            transformed_objects = self._transformer.transform(img_objs)

        return transformed_objects
Example #12
0

class _DictSubcls(dict):
    pass


class _OrderedBidictSubcls(OrderedBidict):
    pass


# pylint: disable=C0103
items = [('a', 1), ('b', 2)]  # use int values so makes sense with Counter
itemsreversed = list(reversed(items))

bidict_of_items = bidict(items)
frozenbidict_of_items = frozenbidict(items)
namedbidict_of_items = namedbidict('named', 'keys', 'vals')(items)
orderedbidict_of_items = OrderedBidict(items)
orderedbidict_of_itemsreversed = OrderedBidict(itemsreversed)
orderedbidictsubcls_of_items = _OrderedBidictSubcls(items)
orderedbidictsubcls_of_itemsreversed = _OrderedBidictSubcls(itemsreversed)
frozenorderedbidict_of_items = FrozenOrderedBidict(items)
frozenorderedbidict_of_itemsreversed = FrozenOrderedBidict(itemsreversed)
bidicts = (
    bidict_of_items,
    frozenbidict_of_items,
    namedbidict_of_items,
    orderedbidict_of_items,
    orderedbidict_of_itemsreversed,
    orderedbidictsubcls_of_items,
    orderedbidictsubcls_of_itemsreversed,
Example #13
0
def test_frozenbidict_hash(d):
    f = frozenbidict(d)
    assert hash(f)
Example #14
0
import string
from math import ceil

import bidict

SYMBOL_MAP = bidict.frozenbidict({
    0: '.',
    **{num: chr(64 + num)
       for num in range(1, 25)},
    **{
        num: chr(110 + ceil(num / 24)) + chr(64 + (num % 24 or 24))
        for num in range(25, 256)
    }
})

SAFE_CHARS = string.ascii_lowercase + string.digits + string.punctuation.replace(
    '.', '')


def maybe_double(symbol: str):
    if len(symbol.encode()) < 2:
        return symbol * 2
    return symbol
Example #15
0
class QuestionnaireImageWidget(QWidget):
    number_key = frozenbidict({
        0: QtCore.Qt.Key_0,
        1: QtCore.Qt.Key_1,
        2: QtCore.Qt.Key_2,
        3: QtCore.Qt.Key_3,
        4: QtCore.Qt.Key_4,
        5: QtCore.Qt.Key_5,
        6: QtCore.Qt.Key_6,
        7: QtCore.Qt.Key_7,
        8: QtCore.Qt.Key_8,
        9: QtCore.Qt.Key_9
    })

    def __init__(self,
                 next_action=None,
                 finish_action=None,
                 previous_action=None,
                 back_action=None,
                 answer_action=None,
                 **kwargs):
        super().__init__(**kwargs)
        if next_action is None:
            self.next_action = self.default_next
        else:
            self.next_action = next_action
        if finish_action is None:
            self.finish_action = self.default_finish
        else:
            self.finish_action = finish_action
        if previous_action is None:
            self.previous_action = self.default_previous
        else:
            self.previous_action = previous_action
        if back_action is None:
            self.back_action = self.default_back
        else:
            self.back_action = back_action
        if answer_action is None:
            self.answer_action = self.default_answer
        else:
            self.answer_action = answer_action

        self.ui = Ui_EmotionQuestionnaireImage()
        self.ui.setupUi(self)

        self.ui.continueButton.clicked.connect(self._continue)
        self.ui.backButton.clicked.connect(self.previous)

        self.continueAction = QAction("Continue", self)
        self.continueAction.setShortcut(QKeySequence("Shift+Return"))
        self.continueAction.triggered.connect(self._continue)
        self.addAction(self.continueAction)

        self._path = None
        self.text = None
        self.qa = []
        self.answers = []
        self.answer_key = bidict()
        self.q_index = 0

        self.multi_answers = 1
        self.current_question = None
        self.current_answers = None
        self.current_color = None
        self.selected_answer = ""

    @property
    def path(self):
        return self._path

    @path.setter
    def path(self, value):
        if isinstance(value, pathlib.Path) or value is None:
            self._path = value
        else:
            self._path = pathlib.Path(value)

    def load_file(self, file):
        if file is not None:
            self.path = file
        if self.path.as_posix() != '.':
            q_file = toml.load(self.path)
            self.qa = q_file['Questions']
            self.q_index = 0
            qa = self.qa[self.q_index]
            self.set_color(qa['color'])
            self.set_question(qa['question'])
            self.set_answers(qa['answers'], qa['format'])
            if 'image' in qa:
                self.set_image(pathlib.Path(qa['image']))

    def set_image(self, file):
        if file is not None:
            pixmap = QtGui.QPixmap(file.as_posix())
            repixmap = pixmap.scaledToHeight(480)
            self.ui.image.setPixmap(repixmap)
            self.current_image = file

    def set_color(self, color):
        if color is not None:
            self.ui.colorSpacer.setStyleSheet('background-color:rgb(' + color +
                                              ')')
            self.current_color = color

    def set_question(self, question):
        self.ui.questionBrowser.setText(question)
        self.current_question = question

    def set_answers(self, answers, _format=None):
        self.remove_answers()
        self.current_answers = answers
        if _format == 'scale':
            size = (1, len(answers))
        else:
            size = (2, -(-len(answers) // 2))
        b_size = (size[0] + 2, size[1] + 2)
        topSpacer = QtWidgets.QSpacerItem(20, 5, QtWidgets.QSizePolicy.Minimum,
                                          QtWidgets.QSizePolicy.Fixed)
        bottomSpacer = QtWidgets.QSpacerItem(20, 5,
                                             QtWidgets.QSizePolicy.Minimum,
                                             QtWidgets.QSizePolicy.Fixed)
        leftSpacer = QtWidgets.QSpacerItem(5, 20, QtWidgets.QSizePolicy.Fixed,
                                           QtWidgets.QSizePolicy.Minimum)
        rightSpacer = QtWidgets.QSpacerItem(5, 20, QtWidgets.QSizePolicy.Fixed,
                                            QtWidgets.QSizePolicy.Minimum)
        self.ui.answersLayout.addItem(topSpacer, 0, 0, 1, b_size[1])
        self.ui.answersLayout.addItem(bottomSpacer, b_size[1], 0, 1, b_size[1])
        self.ui.answersLayout.addItem(leftSpacer, 1, 0, size[1], 1)
        self.ui.answersLayout.addItem(rightSpacer, 1, size[1] + 1, size[1], 1)

        sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
                                           QtWidgets.QSizePolicy.Expanding)
        sizePolicy.setHorizontalStretch(0)
        sizePolicy.setVerticalStretch(0)
        font = QtGui.QFont()
        font.setPointSize(18)
        self.ui.answerChecks = []
        for i in range(0, size[0]):
            for j in range(0, size[1]):
                a_index = i * size[1] + j
                if a_index < len(answers):
                    answer_check = QtWidgets.QCheckBox(self.ui.answersBox)
                    sizePolicy.setHeightForWidth(
                        answer_check.sizePolicy().hasHeightForWidth())
                    answer_check.setSizePolicy(sizePolicy)
                    answer_check.setFont(font)
                    answer_check.setObjectName('answer_check_' + str(a_index))
                    answer_check.setText(
                        QtWidgets.QApplication.translate(
                            "EmotionQuestionnaire", answers[a_index], None,
                            -1))
                    answer_check.clicked.connect(
                        self.generate_answer_function(answer_check))
                    self.ui.answerChecks.append(answer_check)
                    self.ui.answersLayout.addWidget(answer_check, i + 1, j + 1,
                                                    1, 1)
                    self.answer_key[answers[a_index]] = answer_check

    def remove_answers(self):
        self.answer_key.clear()
        while not self.ui.answersLayout.isEmpty():
            item = self.ui.answersLayout.takeAt(0)
            if not item.isEmpty():
                widget = item.widget()
                widget.deleteLater()
                del widget
            self.ui.answersLayout.removeItem(item)

    def generate_answer_function(self, answer_check):
        return lambda v: self.answer_toggle(answer_check, v)

    def answer_toggle(self, answer_check, value):
        self.answer(answer_check, value)
        if self.multi_answers > 0:
            self.limit_answer(self.multi_answers, answer_check)

    def answer(self, check_widget, value):
        answer = self.answer_key.inverse[check_widget]
        if answer is None:
            answer = ""
        self.selected_answer = answer
        event = {
            'type_': 'Questionnaire_AnswerSelected',
            'File': self.path.name,
            'Question': self.current_question,
            'Answer': answer,
            'Value': value
        }
        self.answer_action(event=event, caller=self)

    def limit_answer(self, limit, last):
        available = limit
        other_answers = self.answer_key.copy()
        other_answers.inverse.pop(last)
        for answer in other_answers.values():
            if answer.isChecked():
                if available > 1:
                    available -= 1
                else:
                    answer.setChecked(False)

    def default_answer(self, event=None, caller=None):
        pass

    def keyPressEvent(self, event):
        key = event.key()
        if key in self.number_key.inverse:
            number = self.number_key.inverse[key]
            if number < len(self.current_answers):
                check_widget = self.answer_key[self.current_answers[number]]
                check_widget.setChecked(True)
                self.answer_toggle(check_widget, True)
        elif key == QtCore.Qt.Key_Enter or key == QtCore.Qt.Key_Return:
            self._continue()
        elif key == QtCore.Qt.Key_Backspace:
            self.previous()
        event.accept()

    def _continue(self):
        self.q_index += 1
        event = {
            'type_': 'Questionnaire_AnswerConfirmed',
            'File': self.path.name,
            'Question': self.current_question,
            'Answer': self.selected_answer
        }
        if self.q_index < len(self.qa):
            self.next_action(event=event, caller=self)
        else:
            self.finish_action(event=event, caller=self)

    def default_next(self, event=None, caller=None):
        if self.q_index < len(self.qa) - 1:
            self.ui.continueButton.setText(
                QtWidgets.QApplication.translate("EmotionQuestionnaire",
                                                 "Next", None, -1))
            self.ui.backButton.setText(
                QtWidgets.QApplication.translate("EmotionQuestionnaire",
                                                 "Previous", None, -1))
        else:
            self.ui.continueButton.setText(
                QtWidgets.QApplication.translate("EmotionQuestionnaire",
                                                 "Done", None, -1))
            self.ui.backButton.setText(
                QtWidgets.QApplication.translate("EmotionQuestionnaire",
                                                 "Previous", None, -1))
        qa = self.qa[self.q_index]
        self.set_color(qa['color'])
        self.set_question(qa['question'])
        self.set_answers(qa['answers'], qa['format'])
        if 'image' in qa:
            self.set_image(pathlib.Path(qa['image']))

    def default_finish(self, event=None, caller=None):
        print("Not Connected")

    def previous(self):
        if self.q_index > 0:
            self.q_index -= 1
            event = {
                'type_': 'Questionnaire_AnswerRetracted',
                'File': self.path.name,
                'Question': self.qa[self.q_index]['question']
            }
            self.previous_action(event=event, caller=self)
        else:
            event = {'type_': 'Questionnaire_Exited', 'File': self.path.name}
            self.back_action(event=event, caller=self)

    def default_previous(self, event=None, caller=None):
        if self.q_index > 0:
            self.ui.continueButton.setText(
                QtWidgets.QApplication.translate("EmotionQuestionnaire",
                                                 "Next", None, -1))
            self.ui.backButton.setText(
                QtWidgets.QApplication.translate("EmotionQuestionnaire",
                                                 "Previous", None, -1))
        else:
            self.ui.continueButton.setText(
                QtWidgets.QApplication.translate("EmotionQuestionnaire",
                                                 "Next", None, -1))
            self.ui.backButton.setText(
                QtWidgets.QApplication.translate("EmotionQuestionnaire",
                                                 "Exit", None, -1))
        qa = self.qa[self.q_index]
        self.set_color(qa['color'])
        self.set_question(qa['question'])
        self.set_answers(qa['answers'], qa['format'])
        if 'image' in qa:
            self.set_image(pathlib.Path(qa['image']))

    def default_back(self, event=None, caller=None):
        print('There is no going back')
Example #16
0
from telegram.ext import Updater, Dispatcher, Filters, CommandHandler, MessageHandler

import config
from lib import locale, chart_maker, currencymap
from lib.exchange_api import ExchangeError
from lib.exchange_api.cache_wrapper import CacheWrapper
from lib.exchange_api.exchangeratesapi_io import ExchangeRatesAPIio

# enable logging
logging.basicConfig(format='[%(asctime)s|%(name)s|%(levelname)s]  %(message)s',
                    level=logging.INFO)
logger = logging.getLogger(__name__)

symbol_code_map = bidict.frozenbidict({
    curr_info['symbol']: curr_code
    for curr_code, curr_info in currencymap.get().items()
    if len(curr_info['symbol']) == 1  # only symbols
})

# init api
exchange_api = ExchangeRatesAPIio()

# == WARN ZONE ==
# i don't have enough time to write this in right way(

# get cache backend
if config.IS_LOCAL:
    import cachetools

    cache_backend = cachetools.TTLCache(64, ttl=config.CACHE_TTL_SECONDS)
else:
Example #17
0
    label_names = set(input_data_df[label_col].tolist())
    label_names_size = len(label_names)
    logger.info('Loaded %d label names', label_names_size)

    #Generate label classes
    label_classes = list(range(len(label_names)))
    label_classes_size = len(label_classes)
    logger.info('Created %d label classes', label_classes_size)

    #Output dataframe
    output_df = DataFrame(columns=list(input_data_df))

    print(list(input_data_df))

    #Create names to class mappings
    mappings = frozenbidict(zip(label_names, label_classes))

    for _, row in input_data_df.iterrows():
        label_name = row[label_col]
        label_class = mappings[label_name]

        #Update the row
        row[label_col] = label_class

    #Save the updated dataframe
    input_data_df.to_csv(input_data)

    #Save mappings
    with mapping_keys.open(mode='wb') as handle:
        pickle_dump(mappings, handle)
Example #18
0
 frozenbidict(
     {
         'album': 'TAL',
         'albumsort': 'TSA',
         'albumartist': 'TP2',
         'albumartistsort': 'TS2',
         'artist': 'TP1',
         'artistsort': 'TSP',
         'audiodelay': 'TDY',
         'audiolength': 'TLE',
         'audiosize': 'TSI',
         'bpm': 'TBP',
         'comment': 'COM',
         'compilation': 'TCP',
         'composer': 'TCM',
         'composersort': 'TSC',
         'conductor': 'TP3',
         'copyright': 'TCR',
         'date': 'TYE',
         'discnumber': 'TPA',
         'encodedby': 'TEN',
         'encodersettings': 'TSS',
         'genre': 'TCO',
         'grouping': 'TT1',
         'isrc': 'TRC',
         'key': 'TKE',
         'label': 'TPB',
         'language': 'TLA',
         'license': 'WCP',
         'lyricist': 'TXT',
         'lyrics': 'ULT',
         'media': 'TMT',
         'originalalbum': 'TOT',
         'originalartist': 'TOA',
         'originaldate': 'TOR',
         'originalfilename': 'TOF',
         'originallyricist': 'TOL',
         'people': 'IPL',
         'pictures': 'PIC',
         'playcount': 'CNT',
         'rating': 'POP',
         'remixer': 'TP4',
         'subtitle': 'TT3',
         'tempo': 'STC',
         'title': 'TT2',
         'titlesort': 'TST',
         'tracknumber': 'TRK',
         'ufid': 'UFI',
         'usertext': 'TXX',
         'userurl': 'WXX',
     }, ),
import pytest
from collections import Counter, OrderedDict, defaultdict
from bidict import bidict, frozenbidict, namedbidict
from itertools import product


d = dict(H='hydrogen', He='helium')
c = Counter(d)
o = OrderedDict(d)
dd = defaultdict(int, d)
class dictsubclass(dict): pass
s = dictsubclass(d)

b = bidict(d)
f = frozenbidict(d)
n = namedbidict('named', 'keys', 'vals')(d)

dicts = (d, c, o, dd, s)
bidicts = (b, f, n)

@pytest.mark.parametrize('d, b', product(dicts, bidicts))
def test_eq(d, b):
    assert d == b
Example #20
0
class ID3v2Frames(Tags):
    _v22_FIELD_MAP = frozenbidict({
        'album': 'TAL',
        'albumartist': 'TP2',
        'arranger': 'TP4',
        'artist': 'TP1',
        'audiodelay': 'TDY',
        'audiolength': 'TLE',
        'audiosize': 'TSI',
        'bpm': 'TBP',
        'comment': 'COM',
        'composer': 'TCM',
        'conductor': 'TP3',
        'copyright': 'TCR',
        'date': 'TYE',
        'discnumber': 'TPA',
        'encodedby': 'TEN',
        'encodingsettings': 'TSS',
        'genre': 'TCO',
        'grouping': 'TT1',
        'isrc': 'TRC',
        'language': 'TLA',
        'lyricist': 'TXT',
        'lyrics': 'ULT',
        'media': 'TMT',
        'originalalbum': 'TOT',
        'originalartist': 'TOA',
        'originalauthor': 'TOL',
        'originalyear': 'TOR',
        'pictures': 'PIC',
        'playcount': 'CNT',
        'publisher': 'TPB',
        'subtitle': 'TT3',
        'title': 'TT2',
        'tracknumber': 'TRK'
    })

    _v23_FIELD_MAP = frozenbidict({
        'album': 'TALB',
        'albumsort': 'TSOA',
        'albumartist': 'TPE2',
        'albumartistsort': 'TSO2',
        'arranger': 'TPE4',
        'artist': 'TPE1',
        'artistsort': 'TSOP',
        'audiodelay': 'TDLY',
        'audiolength': 'TLEN',
        'audiosize': 'TSIZ',
        'bpm': 'TBPM',
        'comment': 'COMM',
        'compilation': 'TCMP',
        'composer': 'TCOM',
        'composersort': 'TSOC',
        'conductor': 'TPE3',
        'copyright': 'TCOP',
        'date': 'TYER',
        'discnumber': 'TPOS',
        'encodedby': 'TENC',
        'encodingsettings': 'TSSE',
        'genre': 'TCON',
        'grouping': 'TIT1',
        'isrc': 'TSRC',
        'language': 'TLAN',
        'lyricist': 'TEXT',
        'lyrics': 'USLT',
        'media': 'TMED',
        'originalalbum': 'TOAL',
        'originalartist': 'TOPE',
        'originalauthor': 'TOLY',
        'originalyear': 'TORY',
        'pictures': 'APIC',
        'playcount': 'PCNT',
        'publisher': 'TPUB',
        'subtitle': 'TIT3',
        'title': 'TIT2',
        'titlesort': 'TSOT',
        'tracknumber': 'TRCK'
    })

    _v24_FIELD_MAP = frozenbidict({
        'album': 'TALB',
        'albumsort': 'TSOA',
        'albumartist': 'TPE2',
        'albumartistsort': 'TSO2',
        'arranger': 'TPE4',
        'artist': 'TPE1',
        'artistsort': 'TSOP',
        'audiodelay': 'TDLY',
        'audiolength': 'TLEN',
        'audiosize': 'TSIZ',
        'bpm': 'TBPM',
        'comment': 'COMM',
        'compilation': 'TCMP',
        'composer': 'TCOM',
        'composersort': 'TSOC',
        'conductor': 'TPE3',
        'copyright': 'TCOP',
        'date': 'TDRC',
        'discnumber': 'TPOS',
        'encodedby': 'TENC',
        'encodingsettings': 'TSSE',
        'genre': 'TCON',
        'grouping': 'TIT1',
        'isrc': 'TSRC',
        'language': 'TLAN',
        'lyricist': 'TEXT',
        'lyrics': 'USLT',
        'media': 'TMED',
        'mood': 'TMOO',
        'originalalbum': 'TOAL',
        'originalartist': 'TOPE',
        'originalauthor': 'TOLY',
        'originalyear': 'TORY',
        'pictures': 'APIC',
        'playcount': 'PCNT',
        'publisher': 'TPUB',
        'subtitle': 'TIT3',
        'title': 'TIT2',
        'titlesort': 'TSOT',
        'tracknumber': 'TRCK'
    })

    @classmethod
    def load(cls, data, id3_version):
        if not isinstance(data, DataReader):  # pragma: nocover
            data = DataReader(data)

        if id3_version is ID3Version.v22:
            cls.FIELD_MAP = cls._v22_FIELD_MAP

            struct_pattern = '3s3B'
            size_len = 3
            per_byte = 8
        elif id3_version is ID3Version.v23:
            cls.FIELD_MAP = cls._v23_FIELD_MAP

            struct_pattern = '4s4B2B'
            size_len = 4
            per_byte = 8
        elif id3_version is ID3Version.v24:
            cls.FIELD_MAP = cls._v24_FIELD_MAP

            struct_pattern = '4s4B2B'
            size_len = 4
            per_byte = 7
        else:
            raise ValueError(f"Unsupported ID3 version: {id3_version}")

        frames = defaultdict(list)
        while True:
            try:
                frame = ID3v2Frame.load(data, struct_pattern, size_len,
                                        per_byte)
            except InvalidFrame:
                break

            # Ignore oddities/bad frames.
            if not isinstance(frame, ID3v2BaseFrame):
                continue

            # TODO: Finish any missing frame types.
            # TODO: Move representation into frame classes?
            if isinstance(frame,
                          (ID3v2CommentFrame, ID3v2SynchronizedLyricsFrame,
                           ID3v2UnsynchronizedLyricsFrame)):
                frames[
                    f'{frame.id}:{frame.description}:{frame.language}'].append(
                        frame.value)
            elif isinstance(frame, ID3v2GenreFrame):
                frames['TCON'] = frame.value
            elif isinstance(frame, ID3v2GEOBFrame):
                frames[f'GEOB:{frame.description}'].append({
                    'filename': frame.filename,
                    'mime_type': frame.mime_type,
                    'value': frame.value
                })
            elif isinstance(frame, ID3v2PrivateFrame):
                frames[f'PRIV:{frame.owner}'].append(frame.value)
            elif isinstance(frame,
                            (ID3v2UserTextFrame, ID3v2UserURLLinkFrame)):
                frames[f'{frame.id}:{frame.description}'].append(frame.value)
            elif isinstance(
                    frame,
                (ID3v2NumericTextFrame, ID3v2TextFrame, ID3v2TimestampFrame)):
                frames[frame.id] = frame.value
            else:
                frames[frame.id].append(frame.value)

        return cls(frames)
Example #21
0
 def _build_category_ref_bidict(self, category_lines):
     return frozenbidict({ int(cat[0]):cat[1] for cat in category_lines })
Example #22
0
    def __init__(
        self,
        use_single_note_off_event: bool = False,
        use_end_of_sequence_event: bool = False,
        encode_velocity: bool = False,
        force_velocity_event: bool = True,
        max_time_shift: int = 100,
        velocity_bins: int = 32,
        default_velocity: int = 64,
        encode_instrument: bool = False,
        default_program: int = 0,
        default_is_drum: bool = False,
        num_tracks: Optional[int] = None,
        ignore_empty_tracks: bool = False,
        resolution: int = DEFAULT_RESOLUTION,
        duplicate_note_mode: str = "fifo",
        timing: "Optional[Timing]" = None,
    ):
        self.use_single_note_off_event = use_single_note_off_event
        self.use_end_of_sequence_event = use_end_of_sequence_event
        self.encode_velocity = encode_velocity
        self.force_velocity_event = force_velocity_event
        self.velocity_bins = velocity_bins
        self.default_velocity = default_velocity
        self.encode_instrument = encode_instrument
        self.default_program = default_program
        self.default_is_drum = default_is_drum
        self.num_tracks = num_tracks
        self.ignore_empty_tracks = ignore_empty_tracks
        self.resolution = resolution
        self.duplicate_note_mode = duplicate_note_mode
        self.timing = timing

        if self.timing is None:
            self.timing = TickShiftTiming(max_shift=max_time_shift)

        if encode_instrument and num_tracks is None:
            raise ValueError(
                'Cannot encode instruments when num_tracks is None')

        # Create vocabulary of events
        vocab_list = []  # type: list
        track_ids = [0] if num_tracks is None else range(num_tracks)
        vocab_list.extend(
            (NOTE_ON, tr, i) for tr in track_ids for i in range(128))
        vocab_list.extend((NOTE_OFF, tr, i) for tr in track_ids for i in (
            [ALL_NOTES] if use_single_note_off_event else range(128)))
        vocab_list.extend(self.timing.vocab_list)
        if encode_velocity or num_tracks is None:
            # In single-track mode, always include velocity tokens
            # for backwards compatibility
            vocab_list.extend((VELOCITY, tr, v) for tr in track_ids
                              for v in range(velocity_bins))
        if encode_instrument:
            vocab_list.extend(
                (PROGRAM, tr, pr) for tr in track_ids for pr in range(128))
            vocab_list.extend((DRUM, tr) for tr in track_ids)
        if use_end_of_sequence_event:
            vocab_list.append((EOS, ))

        # Map human-readable tuples to integers
        self.vocab = frozenbidict(
            enumerate(vocab_list)).inverse  # type: frozenbidict[Any, int]