コード例 #1
0
    def __init__(self, cl_environment, compile_flags, cl_function, kernel_data,
                 double_precision, use_local_reduction):
        super().__init__(cl_environment)
        self._cl_function = cl_function
        self._kernel_data = OrderedDict(sorted(kernel_data.items()))
        self._double_precision = double_precision
        self._use_local_reduction = use_local_reduction

        self._mot_float_dtype = np.float32
        if double_precision:
            self._mot_float_dtype = np.float64

        for data in self._kernel_data.values():
            data.set_mot_float_dtype(self._mot_float_dtype)

        self._kernel = self._build_kernel(self._get_kernel_source(),
                                          compile_flags)

        self._workgroup_size = self._kernel.run_procedure.get_work_group_info(
            cl.kernel_work_group_info.PREFERRED_WORK_GROUP_SIZE_MULTIPLE,
            self._cl_environment.device)
        if not self._use_local_reduction:
            self._workgroup_size = 1

        self._kernel_inputs = {
            name: data.get_kernel_inputs(self._cl_context,
                                         self._workgroup_size)
            for name, data in self._kernel_data.items()
        }
コード例 #2
0
    def __init__(self,
                 label,
                 *,
                 spin,
                 basis=None,
                 local_identifiers=None,
                 order_index=None):
        if isinstance(spin, tuple):
            spin = sympy.sympify(spin[0]) / spin[1]
        else:
            spin = sympy.sympify(spin)
        self._spin = spin
        if not (2 * spin).is_integer:
            raise ValueError("spin %s must be an integer or half-integer" %
                             spin)
        try:
            dimension = int(2 * spin) + 1
        except TypeError:
            raise ValueError("spin %s must be an integer or half-integer" %
                             spin)
        if dimension <= 1:
            raise ValueError("spin %s must be greater than zero" % spin)
        bottom = -spin
        if basis is None:
            basis = tuple([
                SpinIndex._static_render(bottom + n) for n in range(dimension)
            ])
        else:
            # sometimes people don't think and use some of the "canonical" TLS
            # labels in the wrong order. We can catch it, so why not?
            if basis == ('up', 'down') or basis == ('+', '-'):
                raise ValueError("Invalid basis: you've switched %s and %s" %
                                 basis)
        super().__init__(
            label=label,
            basis=basis,
            dimension=dimension,
            local_identifiers=local_identifiers,
            order_index=order_index,
        )

        # rewrite the kwargs from super()
        self._kwargs = OrderedDict([
            ('spin', self._spin),
            ('local_identifiers', self._sorted_local_identifiers),
            ('order_index', self._order_index),
        ])
        self._minimal_kwargs = self._kwargs.copy()
        if local_identifiers is None:
            del self._minimal_kwargs['local_identifiers']
        if order_index is None:
            del self._minimal_kwargs['order_index']
コード例 #3
0
 def build_vocab(self, *args, **kwargs):
     """Add unaligned_token to the list of special symbols."""
     counter = Counter()
     sources = []
     for arg in args:
         if isinstance(arg, data.Dataset):
             sources += [
                 getattr(arg, name) for name, field in arg.fields.items()
                 if field is self
             ]
         else:
             sources.append(arg)
     for sample in sources:
         for x in sample:
             if not self.sequential:
                 x = [x]
             try:
                 counter.update(x)
             except TypeError:
                 counter.update(chain.from_iterable(x))
     specials = list(
         OrderedDict.fromkeys(tok for tok in [
             self.unk_token,
             self.pad_token,
             self.init_token,
             self.eos_token,
             self.unaligned_token,
         ] if tok is not None))
     self.vocab = self.vocab_cls(counter, specials=specials, **kwargs)
コード例 #4
0
 def build_vocab(self, *args, **kwargs):
     """Add unaligned_token to the list of special symbols."""
     counter = Counter()
     sources = []
     for arg in args:  # arg是QEDataset类,里面包括examples和fields
         if isinstance(arg, data.Dataset):
             sources += [
                 getattr(arg, name) for name, field in arg.fields.items()
                 if field is self
             ]  # source是列表,列表中元素是迭代器
         else:
             sources.append(arg)
     for sample in sources:
         for x in sample:  # 每次循环读取一个样本,将样本处理成list形式,然后更新counter
             if not self.sequential:
                 x = [x]
             try:
                 counter.update(x)
             except TypeError:
                 counter.update(chain.from_iterable(x))
     specials = list(
         OrderedDict.fromkeys(tok for tok in [
             self.unk_token, self.pad_token, self.init_token,
             self.eos_token, self.unaligned_token
         ] if tok is not None))  # ['<unk>', '<pad>', '<unaligned>']
     self.vocab = self.vocab_cls(counter, specials=specials, **kwargs)
コード例 #5
0
    def get_paginated_response(self, data):
        total = self.page.paginator.count
        per_page = total if self.per_page > total else self.per_page
        if per_page == 0: per_page = 1
        total_page = total / per_page
        str_total_page = (str(total_page)).split('.')[1]

        last_page = int(total_page)
        if int(str_total_page) > 0: last_page += 1

        start_from = 1 + (self.page.number * per_page) - per_page
        if start_from < 1: start_from = 1

        end_at = (start_from + per_page) - 1
        if end_at > total: end_at = total

        if per_page >= total:
            start_from = 1
            end_at = total

        return Response(OrderedDict([
            ('total', total),
            ('per_page', per_page),
            ('current_page', self.page.number),
            ('last_page', last_page),
            ('next_page_url', self.get_next_link()),
            ('prev_page_url', self.get_previous_link()),
            ('from', start_from),
            ('to', end_at),
            ('rows', data)
        ]))
コード例 #6
0
    def __init__(self,
                 channel_multiplier=1.0,
                 channel_divisor=8,
                 channel_min=None,
                 output_stride=32,
                 pad_type='',
                 act_layer=None,
                 se_kwargs=None,
                 norm_layer=nn.BatchNorm2d,
                 norm_kwargs=None,
                 drop_path_rate=0.,
                 feature_location='',
                 verbose=False):
        self.channel_multiplier = channel_multiplier
        self.channel_divisor = channel_divisor
        self.channel_min = channel_min
        self.output_stride = output_stride
        self.pad_type = pad_type
        self.act_layer = act_layer
        self.se_kwargs = se_kwargs
        self.norm_layer = norm_layer
        self.norm_kwargs = norm_kwargs
        self.drop_path_rate = drop_path_rate
        self.feature_location = feature_location
        assert feature_location in ('pre_pwl', 'post_exp', '')
        self.verbose = verbose

        # state updated during build, consumed by model
        self.in_chs = None
        self.features = OrderedDict()
コード例 #7
0
 def get_paginated_response(self, data):
     has_next_link = len(data) >= self.limit
     return Response(
         OrderedDict([('next',
                       self.get_next_link() if has_next_link else None),
                      ('previous', self.get_previous_link()),
                      ('results', data), ('limit', self.limit),
                      ('offset', self.offset)]))
コード例 #8
0
class MockArrangement(Arrangement):
    _arrangements = OrderedDict()

    def publish(self) -> None:
        self._arrangements[self.id()] = self

    def revoke(self):
        del self._arrangements[self.id()]
コード例 #9
0
class OrderedDictLevel(PriceLevel):
    def is_empty(self):
        return len(self.orders) == 0

    def __init__(self):
        PriceLevel.__init__(self)
        self.orders = OrderedDict()

    def _add(self, order):
        self.orders[order[O_ID]] = order

    def _remove(self, order: tuple) -> list:
        return self.orders.pop(order[O_ID])

    def is_not_empty(self) -> bool:
        return len(self.orders) > 0

    def __len__(self):
        return len(self.orders)

    def get_first(self) -> list:
        for order in self.orders.values():
            return order

    def _remove_first(self):
        self.orders.popitem(last=False)

    def get_last(self):
        for order in self.orders.values().__reversed__():
            return order

    def _remove_last(self):
        self.orders.popitem(last=True)
コード例 #10
0
def procesarDialogo(inp, out):
    historial = []
    dicAfirmacion = OrderedDict({})
    dicContradiccion = OrderedDict({"si": {}, "no": {}, "cont": 0})
    line = inp.readline().rstrip()
    while '!' not in line:
        if not (line[0].isalnum()):
            existeContradiccion(dicContradiccion, line)
            procesarAfirmacion(line.lower(), dicAfirmacion, historial)
        elif line[-1] == '?':
            if dicContradiccion["cont"] != 0:
                out.write(line + "\n")
                out.write("I am abroad.\n\n")
            else:
                responderPregunta(line, dicAfirmacion, out, historial)
        line = inp.readline().rstrip()
        if '!' in line:
            out.write(line + "\n\n")
コード例 #11
0
def torch_load_unwrapped(path):
    """Fixing the changes induced by wrapping with DataParallel"""
    state_dict = torch.load(path)
    fixed_dict = OrderedDict()
    for key in state_dict:
        if key.startswith('module.'):
            fixed_dict[key[7:]] = state_dict[key]
        else:
            fixed_dict[key] = state_dict[key]
    return fixed_dict
コード例 #12
0
def split_jsonl_gz(inputs_filepath: str,
                   summaries_filepath: str,
                   output_path: str,
                   fold_proportions: Dict[str, float],
                   seed: Optional[int] = None,
                   max_elements_per_file: Optional[int] = None) -> None:
    assert abs(sum(fold_proportions.values()) -
               1) < 1e-5, 'Fold proportions must sum to 1.'
    assert len(fold_proportions) > 0

    thresholds = OrderedDict()  # type: OrderedDict[str, float]
    proportion_accumulator = 0.0
    for fold, proportion in fold_proportions.items():
        os.makedirs(os.path.join(output_path, fold), exist_ok=True)
        proportion_accumulator += proportion
        thresholds[fold] = proportion_accumulator

    def allocate_to_fold() -> str:
        rand_num = random.random()
        for fold, threshold in thresholds.items():
            if rand_num < threshold:
                return fold
        return fold  # This may happen if because of precision max(threshold.values()) < 1

    if seed is not None:
        random.seed(seed)

    out_files = {
    }  # type: Dict[str, Tuple[Generator[None, str, None], Generator[None, str, None]]]
    for fold in fold_proportions:
        fold_path = os.path.join(output_path, fold)

        inputs_fold_writer = SplitFileWriter(
            os.path.join(fold_path,
                         get_prefix(os.path.basename(inputs_filepath))),
            max_elements_per_file=max_elements_per_file)

        outputs_fold_writer = SplitFileWriter(
            os.path.join(fold_path,
                         get_prefix(os.path.basename(summaries_filepath))),
            max_elements_per_file=max_elements_per_file)

        out_files[fold] = inputs_fold_writer, outputs_fold_writer

    for input_line, summary_line in \
            zip(load_gz_per_line(inputs_filepath), load_gz_per_line(summaries_filepath)):
        fold = allocate_to_fold()
        inputs_writer, outputs_writer = out_files[fold]
        inputs_writer.add(input_line)
        outputs_writer.add(summary_line)

    # Close the files
    for inputs_fold_file, outputs_fold_file in out_files.values():
        inputs_fold_file.close()
        outputs_fold_file.close()
コード例 #13
0
def expand_commutators_leibniz(expr, expand_expr=True):
    """Recursively expand commutators in `expr` according to the Leibniz rule.

    .. math::

        [A B, C] = A [B, C] + [A, C] B

    .. math::

        [A, B C] = [A, B] C + B [A, C]

    If `expand_expr` is True, expand products of sums in `expr`, as well as in
    the result.
    """
    recurse = partial(expand_commutators_leibniz, expand_expr=expand_expr)
    A = wc('A', head=Operator)
    C = wc('C', head=Operator)
    AB = wc('AB', head=OperatorTimes)
    BC = wc('BC', head=OperatorTimes)

    def leibniz_right(A, BC):
        """[A, BC] -> [A, B] C + B [A, C]"""
        B = BC.operands[0]
        C = OperatorTimes.create(*BC.operands[1:])
        return Commutator.create(A, B) * C + B * Commutator.create(A, C)

    def leibniz_left(AB, C):
        """[AB, C] -> A [B, C] C + [A, C] B"""
        A = AB.operands[0]
        B = OperatorTimes(*AB.operands[1:])
        return A * Commutator.create(B, C) + Commutator.create(A, C) * B

    rules = OrderedDict([
        (
            'leibniz1',
            (
                pattern(Commutator, A, BC),
                lambda A, BC: recurse(leibniz_right(A, BC).expand()),
            ),
        ),
        (
            'leibniz2',
            (
                pattern(Commutator, AB, C),
                lambda AB, C: recurse(leibniz_left(AB, C).expand()),
            ),
        ),
    ])

    if expand_expr:
        res = _apply_rules(expr.expand(), rules).expand()
    else:
        res = _apply_rules(expr, rules)
    return res
コード例 #14
0
ファイル: Config.py プロジェクト: MStopa/SpeechToolsWorkers
def get_level(name, labelname, itemtype='SEGMENT', labeltype='STRING'):
    level = OrderedDict()

    level['name'] = name
    level['type'] = itemtype

    attrs = []
    level['attributeDefinitions'] = attrs

    if not type(labelname) is list:
        labelname = [labelname]

    for label in labelname:
        attr = OrderedDict()
        attrs.append(attr)

        attr['name'] = label
        attr['type'] = labeltype

    return level
コード例 #15
0
def generate_config_str(config):
    """Takes a config of Trainer class, and produces a shorted string, which
    tensorboard can handle better.

    Models with many parameters will otherwise quickly create extremely long
    filenames, which can cause problems with your os."""
    entries = [("ml", config["model_type"]), ("ds", config["dataset_type"]),
               ("lr", config["optimizer_settings"]["learning_rate"]),
               ("bs", config["train_batch_size"])]

    entries += dict_to_config_str(config["model_configuration"])

    return "_".join(k + "=" + str(v) for k, v in OrderedDict(entries).items())
コード例 #16
0
def _get_cms_label_data(cms_label, user_language):
    cms_label_data = OrderedDict()
    translated_labels = mdl_cms.translated_text_label.search(
        text_entity=entity_name.OFFER_YEAR,
        labels=cms_label,
        language=user_language
    )

    for label in cms_label:
        translated_text = next((trans.label for trans in translated_labels if trans.text_label.label == label), None)
        cms_label_data[label] = translated_text

    return cms_label_data
コード例 #17
0
def binary_report(predictions, col_true='CLASS'):
    print('Binary classification results:')

    y_true = (predictions[col_true] == 'QSO')
    y_pred_proba = predictions['QSO_PHOTO']
    y_pred_binary = (predictions['CLASS_PHOTO'] == 'QSO')

    n_pos = y_pred_binary.sum()
    n_all = len(y_pred_binary)
    print('Predicted positives: {}/{} ({:.2f}%)'.format(
        n_pos, n_all, n_pos / n_all * 100))

    logloss, logloss_err = bootstrap_metric(log_loss, y_true, y_pred_proba)
    print('Logloss = {:.4f} ({:.4f})'.format(logloss, logloss_err))

    binary_metrics = OrderedDict([
        ('Accuracy', partial(bootstrap_metric, accuracy_score)),
        ('F1', partial(bootstrap_metric, f1_score)),
        ('Precision', partial(bootstrap_metric, precision_score)),
        ('Recall', partial(bootstrap_metric, recall_score)),
    ])
    for metric_name, metric_func in binary_metrics.items():
        score, score_err = metric_func(y_true, y_pred_binary)
        print('{} = {:.4f} ({:.4f})'.format(metric_name, score, score_err))

    # ROC AUC
    fpr, tpr, _ = roc_curve(y_true, y_pred_proba)
    roc_auc = auc(fpr, tpr)
    print('ROC AUC = {:.4f}'.format(roc_auc))
    plot_roc_curve(fpr, tpr, roc_auc)

    # Precision - recall curve
    average_precision = average_precision_score(y_true, y_pred_proba)
    precision, recall = precision_score(y_true, y_pred_binary), recall_score(
        y_true, y_pred_binary)
    precisions, recalls, thresholds = precision_recall_curve(
        y_true, y_pred_proba)
    plot_precision_recall_curve(precisions, recalls, average_precision,
                                precision, recall)
コード例 #18
0
    def build_weights(self, device: torch.device) -> Dict[str, torch.tensor]:
        weights = OrderedDict([
            [
                'conv1',
                xavier_uniform_(
                    torch.empty((32, 1, 5, 5),
                                requires_grad=True,
                                device=device))
            ],
            ['b1', torch.zeros(32, requires_grad=True, device=device)],
            [
                'conv2',
                xavier_uniform_(
                    torch.empty((32, 32, 5, 5),
                                requires_grad=True,
                                device=device))
            ],
            ['b2', torch.zeros(32, requires_grad=True, device=device)],
            [
                'w3',
                xavier_uniform_(
                    torch.empty((1024, 32 * 5 * 7),
                                requires_grad=True,
                                device=device))
            ],
            ['b3', torch.zeros(1024, requires_grad=True, device=device)],
            [
                'w4',
                xavier_uniform_(
                    torch.empty((self.feature_space_size, 1024),
                                requires_grad=True,
                                device=device))
            ],
            [
                'b4',
                torch.zeros(self.feature_space_size,
                            requires_grad=True,
                            device=device)
            ],
            [
                'w5',
                xavier_uniform_(
                    torch.empty((1, self.feature_space_size),
                                requires_grad=True,
                                device=device))
            ],
            ['b5', torch.zeros(1, requires_grad=True, device=device)],
        ])

        return weights
コード例 #19
0
def parse_specifier(specifier):
    """

    Args:
        specifier (str):
    Returns:
        parsed_dict (OrderedDict):
            Like {'ark': 'file.ark', 'scp': 'file.scp'}


    >>> d = parse_specifier('ark,t,scp:file.ark,file.scp')
    >>> print(d['ark,t'])
    file.ark

    """
    if not isinstance(specifier, str):
        raise TypeError('Argument must be str, but got {}'.format(
            type(specifier)))
    sp = specifier.split(':', 1)
    if len(sp) != 2:
        if ':' not in specifier:
            raise ValueError('The output file must be specified with '
                             'kaldi-specifier style,'
                             ' e.g. ark,scp:out.ark,out.scp, but you gave as '
                             '{}'.format(specifier))

    types, files = sp
    types = list((map(lambda x: x.strip(), types.split(','))))
    files = list((map(lambda x: x.strip(), files.split(','))))
    for x in set(types):
        if types.count(x) > 1:
            raise ValueError(f'{x} is duplicated.')

    supported = [{'ark'}, {'scp'}, {'ark', 'scp'}, {'ark', 't'},
                 {'scp', 'ark', 't'}]
    if set(types) not in supported:
        raise ValueError('Invalid type: {}, must be one of {}'.format(
            types, supported))

    if 't' in types:
        types.remove('t')
        types[types.index('ark')] = 'ark,t'

    if len(types) != len(files):
        raise ValueError(
            'The number of file types need to match with the file names: '
            '{} != {}, you gave as {}'.format(len(types), len(files),
                                              specifier))

    return OrderedDict(zip(types, files))
コード例 #20
0
ファイル: Config.py プロジェクト: MStopa/SpeechToolsWorkers
def get_default_emu_config(feats):
    if not feats:
        feats = []

    config = OrderedDict()

    perspectives = []
    config['perspectives'] = perspectives

    perspectives.append(get_perspective('default', []))
    perspectives.append(get_perspective('full', feats))

    restrictions = OrderedDict()
    config['restrictions'] = restrictions

    restrictions['showPerspectivesSidebar'] = True

    buttons = OrderedDict()
    config['activeButtons'] = buttons

    buttons['saveBundle'] = True
    buttons['showHierarchy'] = True

    return config
コード例 #21
0
ファイル: dict_helpers.py プロジェクト: jamesstidard/Mzo-Cli
def unify_dicts(dicts, *, key_order=None, fill=None):
    if not key_order:
        key_order = OrderedSet(k for d in dicts for k in d.keys())

    filtered_dicts = [{k: v for k, v in d.items() if k in key_order} for d in dicts]

    empty_template = {k: fill for k in key_order}
    filled_dicts = [{**empty_template, **d} for d in filtered_dicts]

    column_order = key_sorter(*key_order)
    sorted_dicts = [
        OrderedDict(sorted(d.items(), key=column_order)) for d in filled_dicts
    ]

    return sorted_dicts
コード例 #22
0
 def __init__(self, vehicle: VehicleStatistic = None):
     self.expected_fragged = 0
     self.expected_spotted = 0
     self.expected_damage = 0
     self.expected_deffed = 0
     self.expected_wins = 0
     self.deffed = 0
     self.frags = 0
     self.spotted = 0
     self.battles = 0
     self.damage_done = 0
     self.wins = 0
     self.vehicles = OrderedDict()
     if vehicle:
         for v in vehicle.vehiclestatisticitem_set.all():
             self.add_vehicle(v)
コード例 #23
0
    def get_settings(self, section=None, defaults=None):
        """
        Gets a named section from the configuration source.

        :param section: a :class:`str` representing the section you want to
            retrieve from the configuration source. If ``None`` this will
            fallback to the :attr:`plaster.PlasterURL.fragment`.
        :param defaults: a :class:`dict` that will get passed to
            :class:`configparser.ConfigParser` and will populate the
            ``DEFAULT`` section.
        :return: A :class:`plaster_pastedeploy.ConfigDict` of key/value pairs.

        """
        # This is a partial reimplementation of
        # ``paste.deploy.loadwsgi.ConfigLoader:get_context`` which supports
        # "set" and "get" options and filters out any other globals
        section = self._maybe_get_default_name(section)
        parser = self._get_parser(defaults)
        defaults = parser.defaults()

        try:
            raw_items = parser.items(section)
        except NoSectionError:
            return {}

        local_conf = OrderedDict()
        get_from_globals = {}
        for option, value in raw_items:
            if option.startswith('set '):
                name = option[4:].strip()
                defaults[name] = value
            elif option.startswith('get '):
                name = option[4:].strip()
                get_from_globals[name] = value
                # insert a value into local_conf to preserve the order
                local_conf[name] = None
            else:
                # annoyingly pastedeploy filters out all defaults unless
                # "get foo" is used to pull it in
                if option in defaults:
                    continue
                local_conf[option] = value
        for option, global_option in get_from_globals.items():
            local_conf[option] = defaults[global_option]
        return ConfigDict(local_conf, defaults, self)
コード例 #24
0
ファイル: Config.py プロジェクト: MStopa/SpeechToolsWorkers
def get_config(name, feats):
    config = OrderedDict()

    config['name'] = name
    config['UUID'] = str(uuid1())
    config['mediafileExtension'] = 'wav'

    tracks = []
    config['ssffTrackDefinitions'] = tracks

    if feats:
        for feat in feats:
            if feat in features:
                tracks.append(features[feat])
            else:
                logger.warn(
                    'Warning: feature not recognized -- {}'.format(feat))

    levels = []
    config['levelDefinitions'] = levels

    levels.append(get_level('Utterance', 'Utterance', itemtype='ITEM'))
    levels.append(get_level('Word', 'Word'))
    # levels.append(getLevel('Syllable', ['Syllable', 'Stress'], itemtype='ITEM'))
    # levels.append(getLevel('Phonetic Syllable', ['Syllable', 'Stress'], itemtype='ITEM'))
    levels.append(get_level('Phoneme', ['Phoneme', 'SAMPA', 'IPA']))

    links = []
    config['linkDefinitions'] = links

    links.append(get_link('Utterance', 'Word'))
    # links.append(getLink('Word', 'Syllable'))
    # links.append(getLink('Word', 'Phonetic Syllable'))
    # links.append(getLink('Syllable', 'Phoneme'))
    # links.append(getLink('Phonetic Syllable', 'Phoneme'))
    links.append(get_link('Word', 'Phoneme'))

    config['EMUwebAppConfig'] = get_default_emu_config(feats)

    return config
コード例 #25
0
ファイル: Config.py プロジェクト: MStopa/SpeechToolsWorkers
def get_perspective(name, feats):
    perspective = OrderedDict()

    perspective['name'] = name

    sig_cnv = OrderedDict()
    perspective['signalCanvases'] = sig_cnv

    sig_cnv['order'] = ['OSCI', 'SPEC']
    sig_cnv['assign'] = []
    sig_cnv['contourLims'] = []

    if 'forest' in feats:
        assign_spec = OrderedDict()
        assign_spec['signalCanvasName'] = 'SPEC'
        assign_spec['ssffTrackName'] = 'Formants'

        formant_colors = OrderedDict()
        formant_colors['ssffTrackName'] = 'Formants'
        formant_colors['colors'] = [
            'rgb(255,100,100)', 'rgb(100,255,100)', 'rgb(100,100,255)',
            'rgb(100,255,255)'
        ]

        sig_cnv['assign'].append(assign_spec)
        sig_cnv['contourColors'] = [formant_colors]

    if 'ksvF0' in feats or 'mhsF0' in feats:
        sig_cnv['order'].append('Pitch')

    if 'rmsana' in feats:
        sig_cnv['order'].append('RMS')

    if 'zcr' in feats:
        sig_cnv['order'].append('ZeroCross')

    lev_cnv = OrderedDict()
    perspective['levelCanvases'] = lev_cnv

    lev_cnv['order'] = ['Word', 'Phoneme']

    twodim_cnv = OrderedDict()
    perspective['twoDimCanvases'] = twodim_cnv

    twodim_cnv['order'] = []

    return perspective
コード例 #26
0
    def forward(self, model_out, target_x):
        """gives the batch normalized Variational Error."""
        model_out_x, mu, log_var = model_out
        batch_size = target_x.size()[0]
        seq_len = target_x.size()[1]
        z_size = mu.size()[1]
        model_out_x = F.softmax(model_out_x, dim=2)
        #following mkusner/grammarVAE
        BCE = seq_len * self.bce_loss(model_out_x, target_x)
        # this normalizer is for when we're not sampling so only have mus, not sigmas
        avg_mu = torch.sum(mu, dim=0) / batch_size
        var = torch.mm(mu.t(), mu) / batch_size
        var_err = var - Variable(to_gpu(torch.eye(z_size)))
        var_err = torch.tanh(
            var_err) * var_err  # so it's ~ x^2 asymptotically, not x^4
        mom_err = (avg_mu * avg_mu).sum() / z_size + var_err.sum() / (z_size *
                                                                      z_size)
        if self.sample_z:
            # see Appendix B from VAE paper:
            # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
            # https://arxiv.org/abs/1312.6114
            # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
            KLD_element = (1 + log_var - mu * mu - log_var.exp())
            KLD = -0.5 * torch.mean(KLD_element)
            KLD_ = KLD.data.item()
            my_loss = BCE + self.reg_weight * KLD
        else:
            my_loss = BCE + self.reg_weight * mom_err
            KLD_ = 0
        if not self.training:
            # ignore regularizers when computing validation loss
            my_loss = BCE

        self.metrics = OrderedDict([('BCE', BCE.data.item()), ('KLD', KLD_),
                                    ('ME', mom_err.data.item())])
        return my_loss
コード例 #27
0
def procesarAfirmacion(line, dicAfirmacion, historial):
    af = line.split(' ', 3) if ("don't" in line
                                or "doesn't" in line) else line.split(' ', 2)
    signo = af[0][0]
    af[0] = af[0].lstrip('+-')
    af[-1] = af[-1].rstrip('.')
    if "don't" in af or "doesn't" in af:
        af.append(af[1])
        af.pop(1)
    subject = af[0]
    predicate = af[1].rstrip('s') if (af[0] != "I"
                                      or af[0] != "you") else af[1]
    obj = af[-2] if (af[-1] == "don't" or af[-1] == "doesn't") else af[-1]
    if len(af) == 2 or (len(af) == 3 and
                        (af[-1] == "don't" or af[-1] == "doesn't")):
        obj = None
    neg = af[-1] if (af[-1] == "don't" or af[-1] == "doesn't") else None
    if af[0] == "nobody":
        neg = "don't"
    if signo == '+':
        if dicAfirmacion.get(subject) is None:
            dicPredicate = OrderedDict({predicate: [(obj, neg)]})
            dicAfirmacion[subject] = dicPredicate
        elif dicAfirmacion[subject].get(predicate) is None:
            dicAfirmacion[subject][predicate] = [(obj, neg)]
        else:
            dicAfirmacion[subject][predicate].append((obj, neg))
        historial.append((subject, predicate, obj, neg))
    else:  #signo == '-'
        lista = dicAfirmacion[subject][predicate]
        lista.remove((obj, neg))
        if dicAfirmacion[subject][predicate] == []:
            dicAfirmacion[subject].pop(predicate)
        if dicAfirmacion[subject] == {}:
            dicAfirmacion.pop(subject)
        historial.remove((subject, predicate, obj, neg))
コード例 #28
0
    def fit_vocab(
        self,
        samples,
        vocab_size=None,
        vocab_min_freq=0,
        embeddings_name=None,
        keep_rare_words_with_embeddings=False,
        add_embeddings_vocab=False,
    ):
        tokens = Counter()
        for sample in samples:
            # TODO: subtokenize?
            tokens.update(self.tokenize(sample))

        # We use our own Vocabulary class
        specials = list(
            OrderedDict.fromkeys(
                tok for tok in [self.unaligned_token] if tok is not None
            )
        )
        # TODO: handle embeddings/vectors
        self.vocab = Vocabulary(
            tokens,
            max_size=vocab_size,
            min_freq=vocab_min_freq,
            unk_token=self.unk_token,
            pad_token=self.pad_token,
            bos_token=self.bos_token,
            eos_token=self.eos_token,
            specials=specials,
            specials_first=self.specials_first,
            # TODO: missing vectors, etc.
            vectors=None,
            rare_with_vectors=keep_rare_words_with_embeddings,
            add_vectors_vocab=add_embeddings_vocab,
        )
コード例 #29
0
    def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None,
                 output_stride=32, pad_type='', act_layer=None, se_kwargs=None,
                 norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0., feature_location='',
                 verbose=False, **kwargs):
        self.channel_multiplier = channel_multiplier
        self.channel_divisor = channel_divisor
        self.channel_min = channel_min
        self.output_stride = output_stride
        self.pad_type = pad_type
        self.act_layer = act_layer
        self.se_kwargs = se_kwargs
        self.norm_layer = norm_layer
        self.norm_kwargs = norm_kwargs
        self.drop_path_rate = drop_path_rate
        self.feature_location = feature_location
        self.verbose = verbose
        assert feature_location in ('bottleneck', 'depthwise', 'expansion', '')
        # self.verbose = verbose
        self.logger = kwargs.get('logger', None)
        # self.verbose = True

        # state updated during build, consumed by model
        self.in_chs = None
        self.features = OrderedDict()
コード例 #30
0
class ExternalLearningUnitBaseForm(LearningUnitBaseForm):
    forms = OrderedDict()
    academic_year = None
    subtype = learning_unit_year_subtypes.FULL

    entity_version = None

    form_cls = form_cls_to_validate = [
        LearningUnitModelForm,
        LearningUnitYearForExternalModelForm,
        LearningContainerModelForm,
        LearningContainerYearExternalModelForm,
        ExternalLearningUnitModelForm
    ]

    def __init__(self, person, academic_year, learning_unit_instance=None, data=None, start_year=None, *args, **kwargs):
        self.academic_year = academic_year
        self.person = person
        self.learning_unit_instance = learning_unit_instance
        instances_data = self._build_instance_data(data)

        super().__init__(instances_data, *args, **kwargs)
        self.learning_unit_year_form.fields['acronym'] = ExternalAcronymField()
        self.start_year = self.instance.learning_unit.start_year if self.instance else start_year

    @property
    def learning_unit_external_form(self):
        return self.forms[ExternalLearningUnitModelForm]

    @property
    def learning_container_year_form(self):
        return self.forms[LearningContainerYearExternalModelForm]

    @property
    def learning_unit_year_form(self):
        return self.forms[LearningUnitYearForExternalModelForm]

    def _build_instance_data(self, data):
        return {
            LearningUnitModelForm: {
                'data': merge_data(data, {'periodicity': 'ANNUAL'}),
                'instance': self.instance.learning_unit if self.instance else None,
            },
            LearningContainerModelForm: {
                'data': data,
                'instance': self.instance.learning_container_year.learning_container if self.instance else None,
            },
            LearningUnitYearForExternalModelForm: self._build_instance_data_learning_unit_year(data),
            LearningContainerYearExternalModelForm: self._build_instance_data_learning_container_year(data),
            ExternalLearningUnitModelForm: self._build_instance_data_external_learning_unit(data)
        }

    def _build_instance_data_external_learning_unit(self, data):
        return {
            'data': data,
            'instance': self.instance and self.instance.externallearningunityear,
            'person': self.person
        }

    def _build_instance_data_learning_unit_year(self, data):
        return {
            'data': data,
            'instance': self.instance,
            'initial': {
                'status': True,
                'academic_year': self.academic_year
            } if not self.instance else {},
            'person': self.person,
            'subtype': self.subtype
        }

    def _build_instance_data_learning_container_year(self, data):
        return {
            'data': data,
            'instance': self.instance and self.instance.learning_container_year,
            'initial': {
                # Default language French
                'language': language.find_by_code('FR'),
            },
            'person': self.person
        }

    def get_context(self):
        return {
            'learning_unit_year': self.instance,
            'subtype': self.subtype,
            'learning_unit_form': self.learning_unit_form,
            'learning_unit_year_form': self.learning_unit_year_form,
            'learning_container_year_form': self.learning_container_year_form,
            'learning_unit_external_form': self.learning_unit_external_form
        }

    @transaction.atomic()
    def save(self, commit=True):
        academic_year = self.academic_year

        learning_container = self.learning_container_form.save(commit)
        learning_unit = self.learning_unit_form.save(
            start_year=self.start_year,
            learning_container=learning_container,
            commit=commit
        )
        container_year = self.learning_container_year_form.save(
            academic_year=academic_year,
            learning_container=learning_container,
            acronym=self.learning_unit_year_form.instance.acronym,
            commit=commit
        )

        # Save learning unit year (learning_unit_component +  learning_component_year + entity_component_year)
        learning_unit_year = self.learning_unit_year_form.save(
            learning_container_year=container_year,
            learning_unit=learning_unit,
            commit=commit
        )

        self.learning_unit_external_form.instance.learning_unit_year = learning_unit_year
        self.learning_unit_external_form.save(commit)

        return learning_unit_year

    def is_valid(self):
        return super().is_valid() and self.learning_unit_external_form.post_clean(self.academic_year.start_date)