Пример #1
0
    def __exit__(self, exc_type, exc_value, exc_traceback):
        if isinstance(exc_value, ModuleCacheValid) or \
            exc_type is ModuleCacheValid or \
            exc_value is ModuleCacheValid:
            inspect.stack()[1][0].f_globals.update(self.moduledata)
            return True
        elif exc_value is None:
            new_moduledata = valfilter(
                complement(flip(isinstance)(ModuleType)),
                dissoc(inspect.stack()[1][0].f_globals, *self.suppress))

            # Check that all objects can be cached
            for _ in starmap(self._check_cachability, new_moduledata.items()):
                pass

            new_metadata = self.invalidator.new_metadata(new_moduledata)
            self._put_in_cache(new_metadata, new_moduledata)
            return True
        else:
            return False
Пример #2
0
 def generate(self, estimator, methods, trim, argument_names, **extra_args):
     functions = tuple(
         map(
             tupapply,
             zip(map(method_dispatcher.__getitem__, methods),
                 repeat(estimator))))
     if argument_names is not None:
         outer = Function(tuple(map(safe_symbol, argument_names)), tuple(),
                          tuple(map(safe_symbol, argument_names)))
         functions = tuple(map(lambda x: x.compose(outer), functions))
     sorted_functions = toposort(functions)
     names = dict(zip(functions, methods))
     unnamed = tuple(
         filter(complement(names.__contains__), sorted_functions))
     names = merge(
         names,
         dict(
             tupsmap(1,
                     curry(__mod__)('_f%d'),
                     map(compose(tuple, reversed), enumerate(unnamed)))))
     return self.template.render(functions=sorted_functions,
                                 printer=self.printer,
                                 namer=names.__getitem__,
                                 **extra_args)
Пример #3
0
def test_complement():
    # No args:
    assert complement(lambda: False)()
    assert not complement(lambda: True)()

    # Single arity:
    assert complement(iseven)(1)
    assert not complement(iseven)(2)
    assert complement(complement(iseven))(2)
    assert not complement(complement(isodd))(2)

    # Multiple arities:
    both_even = lambda a, b: iseven(a) and iseven(b)
    assert complement(both_even)(1, 2)
    assert not complement(both_even)(2, 2)

    # Generic truthiness:
    assert complement(lambda: "")()
    assert complement(lambda: 0)()
    assert complement(lambda: None)()
    assert complement(lambda: [])()

    assert not complement(lambda: "x")()
    assert not complement(lambda: 1)()
    assert not complement(lambda: [1])()
Пример #4
0
    def from_csv(cls,
                 path_or_file,
                 vocabs,
                 vocab_level='vocab',
                 name=None,
                 header=True,
                 ignore=[],
                 **csv_kwargs):
        '''
        Construct CodeCollection(s) from a CSV file.
        
        path_or_file : string or file-like
            Either the path to a CSV file or a file-like object (such as a file or StringIO).
            
        vocabs : dict
            A dict in which the keys are either strings (if a header is available) or column numbers
            and the values are Vocabulary objects.  All cells in the specified fields will be parsed 
            by the specified vocabularies.  The number of code collections returned will be equal to 
            the number of key-value pairs in vocabs.
        
        header : bool or list
            If True, use the first row of the csv as a header.  If False, no header will be used.  If a 
            list, the elements of the list will be used as a header and must match the number of columns 
            in the CSV file.
        
        ignore : list
            A list of strings (if a header is available) or column numbers specifying fields in the CSV 
            that should be ignored.
            
        
        csv_kwargs : dict
            Arguments to pass to csv.reader.
        
        Returns
        -------
        
        
        dict
            The keys are the same those of vocabs and the values are CodeCollection objects.
        
        '''
        # Open file if path provided
        if isinstance(path_or_file, string_types):
            infile = open(path_or_file, 'r')
        else:
            infile = path_or_file

        # Figure out header
        reader = csv.reader(infile, **csv_kwargs)
        if header == True:
            header = next(reader)
        else:
            header = header
        header_map = None
        first = True

        # Parse the file
        results = list()
        for i, row in enumerate(reader):
            if header == False:
                header = list(range(len(row)))
            if header_map is None:
                header_map = dict(zip(header, list(range(len(row)))))
                header_map = merge(
                    dict(zip(list(range(len(row))), list(range(len(row))))),
                    header_map)
            if first:
                if len(header) != len(row):
                    raise ValueError(
                        'Header length does not match row length!')
                ignore_cols = sorted(map(header_map.__getitem__, ignore))
                vocab_keys = list(vocabs.keys())
                vocab_map = dict(
                    zip(vocab_keys, map(header_map.__getitem__, vocab_keys)))
                level_cols = tuple(
                    filter(
                        complement((set(vocab_map.values())
                                    | set(ignore_cols)).__contains__),
                        range(len(header))))
                levels = tuple(map(header.__getitem__,
                                   level_cols)) + (vocab_level, )
                first = False
            keys = tuple(map(row.__getitem__, level_cols))
            for vocab_key, vocab in vocabs.items():
                to_parse = row[vocab_map[vocab_key]]
                if not to_parse.strip():
                    results.append((keys + (vocab_key, ), set()))
                else:
                    try:
                        results.append(
                            (keys + (vocab_key, ), vocab.parse(to_parse)))
                    except ParseException:
                        raise ValueError(
                            'Unable to parse field %s of row %d: "%s"' %
                            (str(vocab_key), i, str(to_parse)))

        # Instantiate the CodeCollection objects
        return cls(*results, levels=levels, name=name)