Exemplo n.º 1
0
def upload_conf(src_file, dst_file):
    files.upload_template(src_file,
                          dst_file,
                          context=select_keys(str.isupper, globals()),
                          use_sudo=True,
                          backup=False,
                          use_jinja=True)
Exemplo n.º 2
0
def buildDict(prefix, urlx):
    rd = jutils.getHttpCsv(urlx)
    rdict = {}
    wlist = []
    for row in rd:
        cols = row.split(',')
        if len(cols) > 5:
            wlist.append(cols[5])
    w2list = [i for i in wlist if len(i) >= CHAR_MIN and len(i) <= CHAR_MAX]
    #print jj(w2list[:50])
    #pr = partListToDict(w2list,keyFuncFirstChar)
    pr = funcy.group_by(lambda x: x[0], w2list)
    c = Counter([i[0] for i in w2list]).most_common(MOST_LIMIT)
    # print jj(c[:24])
    # print jj(pr[c[0][0]])
    fmck = [i[0] for i in c]
    fmcv = [i[1] for i in c]
    # 256*8=2048
    # 512*4=2048
    wordlist = funcy.flatten([pr[x][:4] for x in fmck])
    rdict['data'] = funcy.select_keys(lambda x: x in fmck, pr)
    rdict['meta'] = {
        'source': urlx,
        'wordlist': wordlist,
        'firstMostCommonKey': fmck,
        'firstMostCommonCount': fmcv,
        'host': 'http://data.gov.tw',
        'build': 'http://console.tw',
        'script':
        'https://github.com/y12studio/console-tw/tree/master/projects/datachart/',
        'prefix': prefix,
        'time': datetime.datetime.utcnow().isoformat()
    }
    return rdict
Exemplo n.º 3
0
 def get_raw_counts(self, refs: Sequence[Commit]) -> List[Count]:
     items = []
     hits, misses = 0, 0
     for commit in refs:  # type: Commit
         commit_file = self.get_commit_file(commit)
         if commit_file.is_file():
             self.logv(f"Loading {commit_file}")
             raw_count = json.loads(commit_file.read_text())
             hits += 1
         else:
             self.checkout(commit.ref)
             raw_count = self.get_raw_count()
             commit_file.write_text(json.dumps(raw_count))
             self.logv(f"Saving {commit_file}")
             misses += 1
         counts = select_keys(self.languages, raw_count)
         count = count_schema.load({
             "commit": commit_schema.dump(commit),
             "counts": counts
         })
         items.append(count)
     hit_str = ntext(hits, "commit", "commits")
     miss_str = ntext(misses, "commit", "commits")
     self.log(f"{hits} {hit_str} cached", fg="green")
     self.log(f"{misses} {miss_str} missed", fg="yellow")
     return items
Exemplo n.º 4
0
def create_repo(session, repo_info_dict):
    keys = ('full_name', 'github_repo_id', 'owner')
    r_dict = funcy.select_keys(lambda x: x in keys, repo_info_dict)
    c, exists = get_one_or_create(session, Repo, github_repo_id=r_dict['github_repo_id'])
    if exists:
        print('Repo already exists', r_dict['github_repo_id'])
    else:
        c, created = get_one_or_create(session, Repo, **r_dict)
    return c
Exemplo n.º 5
0
def create_diff(session, d_dict):
    keys = (
        'filename_old','filename_new', 'filetype', 'is_rename', 'is_new', 'is_deletion',
    'raw_diff', 'additions', 'deletions')
    r_dict = funcy.select_keys(lambda x: x in keys, d_dict)
    c, exists = get_one_or_create(session, Diff, **r_dict)
    if exists:
        print('Diff already exists', r_dict)
    return c
Exemplo n.º 6
0
def create_developer(session, develop_dict):
    keys = ('name','email', 'username', 'affiliation')
    r_dict = funcy.select_keys(lambda x: x in keys, develop_dict)
    c, exists = get_one_or_create(session, Developer, email=develop_dict['email'])
    if exists:
        print('Developer already exists ', develop_dict['email'])
    else:
        c, created = get_one_or_create(session, Developer, **develop_dict)
    return c
Exemplo n.º 7
0
def public_attrs(obj):
    """ Return "public" attributes of the object.

    This function omits object's methods and attributes which name starts with
    an underscore.

    Returns:
        dict: Mapping of attributes to their values.
    """
    return select_keys(lambda k: not k.startswith('_'), obj.__dict__)
Exemplo n.º 8
0
    def _post_save(self, sender, instance, **kwargs):
        # Invoke invalidations for both old and new versions of saved object
        old = _old_objs.__dict__.pop((sender, instance.pk), None)
        if old:
            invalidate_obj(old)
        invalidate_obj(instance)

        # Get all concrete parent and child classes, and mark those for invalidation too
        related_types = (get_related_classes(sender, parent_classes=True) +
                         get_related_classes(sender, parent_classes=False))
        for related_type in related_types:
            related_old = _old_objs.__dict__.pop((related_type, instance.pk), None)
            if related_old:
                invalidate_obj(related_old)
            try:
                related_instance = related_type.objects.get(pk=instance.pk)
                invalidate_obj(related_instance)
            except related_type.DoesNotExist:
                pass

        if in_transaction() or not settings.CACHEOPS_ENABLED:
            return

        # NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
        #       but its base having one. Or vice versa.
        #       We still need to invalidate in this case, but cache on save better be skipped.
        cacheprofile = model_profile(instance.__class__)
        if not cacheprofile:
            return

        # Enabled cache_on_save makes us write saved object to cache.
        # Later it can be retrieved with .get(<cache_on_save_field>=<value>)
        # <cache_on_save_field> is pk unless specified.
        # This sweet trick saves a db request and helps with slave lag.
        cache_on_save = cacheprofile.get('cache_on_save')
        if cache_on_save:
            # HACK: We get this object "from field" so it can contain
            #       some undesirable attributes or other objects attached.
            #       RelatedField accessors do that, for example.
            #
            #       So we strip down any _*_cache attrs before saving
            #       and later reassign them
            unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
            for k in unwanted_dict:
                del instance.__dict__[k]

            key = 'pk' if cache_on_save is True else cache_on_save
            cond = {key: getattr(instance, key)}
            qs = sender.objects.inplace().filter(**cond).order_by()
            if MAX_GET_RESULTS:
                qs = qs[:MAX_GET_RESULTS + 1]
            qs._cache_results(qs._cache_key(), [instance])

            # Reverting stripped attributes
            instance.__dict__.update(unwanted_dict)
Exemplo n.º 9
0
def create_change(session, c_dict):
    keys = (
        'function_changed','location_changed', 'raw_changes', 'additions', 'deletions')
    r_dict = funcy.select_keys(lambda x: x in keys, c_dict)
    c, exists = get_one_or_create(session, Change , raw_changes=r_dict['raw_changes'])
    if exists:
        print('Change already exists', r_dict)
    else:
        # if call get or create, json will error
        c = Change(**r_dict)
    return c
Exemplo n.º 10
0
def create_commit(session, commit_info_dict):
    keys = ('sha1', 'git_hash', 'subject', 'timestamp', 'date_time', 'commit_body', 'raw_text')
    comm_dict = funcy.select_keys(lambda x: x in keys, commit_info_dict)
    comm_dict['date_time'] = datetime.strptime(comm_dict['date_time'], '%Y-%m-%d %H:%M:%S %z')
    comm_dict['timestamp'] = datetime.fromtimestamp(int(comm_dict['timestamp']))
    # check if a commit already exists with same hash
    c, exists = get_one_or_create(session, Commit, git_hash=comm_dict['git_hash'])
    if exists:
        print('Commit obj already exists', comm_dict['git_hash'])
    else:
        c, created = get_one_or_create(session, Commit, **comm_dict)
    return c
Exemplo n.º 11
0
def on_demand_result(request, serie_validation_id):
    serie_validation = get_object_or_404(SerieValidation,
                                         id=serie_validation_id)
    if serie_validation.created_by_id != request.user.id:
        raise Http404

    if 'json' in request.GET:
        data = select_keys(r'kappa', serie_validation.__dict__)
        return JsonResponse(data)

    return render(request, 'tags/on_demand_result.j2',
                  {'serie_validation': serie_validation})
Exemplo n.º 12
0
    def __getitem__(self, key):
        if isinstance(key, slice):
            assert key.step is None
            start = self.start if key.start is None else key.start
            end = self.end if key.stop is None else key.stop

            return self.evolve(
                data=fn.select_keys(lambda t: start <= t < end, self.data),
                start=start,
                end=end,
            )
        return self.data[key]
Exemplo n.º 13
0
    def _post_save(self, sender, instance, using, **kwargs):
        if not settings.CACHEOPS_ENABLED or no_invalidation.active:
            return

        # Invoke invalidations for both old and new versions of saved object
        old = _old_objs.__dict__.pop((sender, instance.pk), None)
        if old:
            invalidate_obj(old, using=using)
        invalidate_obj(instance, using=using)

        invalidate_o2o(sender, old, instance, using=using)

        # We run invalidations but skip caching if we are dirty
        if transaction_states[using].is_dirty():
            return

        # NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
        #       but its base having one. Or vice versa.
        #       We still need to invalidate in this case, but cache on save better be skipped.
        cacheprofile = model_profile(instance.__class__)
        if not cacheprofile:
            return

        # Enabled cache_on_save makes us write saved object to cache.
        # Later it can be retrieved with .get(<cache_on_save_field>=<value>)
        # <cache_on_save_field> is pk unless specified.
        # This sweet trick saves a db request and helps with slave lag.
        cache_on_save = cacheprofile.get('cache_on_save')
        if cache_on_save:
            # HACK: We get this object "from field" so it can contain
            #       some undesirable attributes or other objects attached.
            #       RelatedField accessors do that, for example.
            #
            #       So we strip down any _*_cache attrs before saving
            #       and later reassign them
            unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
            for k in unwanted_dict:
                del instance.__dict__[k]

            key = 'pk' if cache_on_save is True else cache_on_save
            cond = {key: getattr(instance, key)}
            qs = sender.objects.inplace().using(using).filter(
                **cond).order_by()
            # Mimic Django 3.0 .get() logic
            if MAX_GET_RESULTS and (not qs.query.select_for_update
                                    or connections[qs.db].features.
                                    supports_select_for_update_with_limit):
                qs.query.set_limits(high=MAX_GET_RESULTS)
            qs._cache_results(qs._cache_key(), [instance])

            # Reverting stripped attributes
            instance.__dict__.update(unwanted_dict)
Exemplo n.º 14
0
    def add(self, config, strip_app_name=False, filter_by_app_name=False,
            key_normalisation_func=default_key_normalisation_func):
        """
        Add a dict of config data. Values from later dicts will take precedence
        over those added earlier, so the order data is added matters.

        Note: Double underscores can be used to indicate dict key name
        boundaries. i.e. if we have a dict like:

        {
            'logging': {
                'level': INFO
                ...
            }
        }

        we could pass an environment variable LOGGING__LEVEL=DEBUG to override
        the log level.

        Note: Key names will be normalised by recursively applying the
        key_normalisation_func function. By default this will:

            1) Convert keys to lowercase
            2) Replace hyphens with underscores
            3) Strip leading underscores

        This allows key names from different sources (e.g. CLI args, env vars,
        etc.) to be able to override each other.

        :param config dict: config data
        :param strip_app_name boolean: If True, the configured app_name will
        stripped from the start of top-level input keys if present.
        :param filter_by_app_name boolean: If True, keys that don't begin with
        the app name will be discarded.
        :return:
        """
        config = walk_recursive(key_normalisation_func, OrderedDict(config))

        if filter_by_app_name:
            config = funcy.compact(funcy.select_keys(
                lambda k: k.startswith(self._app_name), config))

        if strip_app_name:
            strip_app_name_regex = re.compile("^%s" % self._app_name)
            config = funcy.walk_keys(
                lambda k: re.sub(strip_app_name_regex, '', k), config)

        self._sources.append(config)

        return self             # enables a fluent interface
Exemplo n.º 15
0
    def _post_save(self, sender, instance, using, **kwargs):
        if not settings.CACHEOPS_ENABLED:
            return

        # Invoke invalidations for both old and new versions of saved object
        old = _old_objs.__dict__.pop((sender, instance.pk), None)
        if old:
            invalidate_obj(old, using=using)
        invalidate_obj(instance, using=using)

        # We run invalidations but skip caching if we are dirty
        if transaction_states[using].is_dirty():
            return

        # NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
        #       but its base having one. Or vice versa.
        #       We still need to invalidate in this case, but cache on save better be skipped.
        cacheprofile = model_profile(instance.__class__)
        if not cacheprofile:
            return

        # Enabled cache_on_save makes us write saved object to cache.
        # Later it can be retrieved with .get(<cache_on_save_field>=<value>)
        # <cache_on_save_field> is pk unless specified.
        # This sweet trick saves a db request and helps with slave lag.
        cache_on_save = cacheprofile.get('cache_on_save')
        if cache_on_save:
            # HACK: We get this object "from field" so it can contain
            #       some undesirable attributes or other objects attached.
            #       RelatedField accessors do that, for example.
            #
            #       So we strip down any _*_cache attrs before saving
            #       and later reassign them
            unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
            for k in unwanted_dict:
                del instance.__dict__[k]

            key = 'pk' if cache_on_save is True else cache_on_save
            cond = {key: getattr(instance, key)}
            qs = sender.objects.inplace().using(using).filter(**cond).order_by()
            qs._cache_results(qs._cache_key(), [instance])

            # Reverting stripped attributes
            instance.__dict__.update(unwanted_dict)
Exemplo n.º 16
0
    def _post_save(self, sender, instance, **kwargs):
        # Invoke invalidations for both old and new versions of saved object
        old = _old_objs.pop((get_thread_id(), sender, instance.pk), None)
        if old:
            invalidate_obj(old)
        invalidate_obj(instance)

        # NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
        #       but its base having one. Or vice versa.
        #       We still need to invalidate in this case, but cache on save better be skipped.
        if not instance._cacheprofile:
            return

        # Enabled cache_on_save makes us write saved object to cache.
        # Later it can be retrieved with .get(<cache_on_save_field>=<value>)
        # <cache_on_save_field> is pk unless specified.
        # This sweet trick saves a db request and helps with slave lag.
        cache_on_save = instance._cacheprofile.get('cache_on_save')
        if cache_on_save:
            # HACK: We get this object "from field" so it can contain
            #       some undesirable attributes or other objects attached.
            #       RelatedField accessors do that, for example.
            #
            #       So we strip down any _*_cache attrs before saving
            #       and later reassign them
            unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
            for k in unwanted_dict:
                del instance.__dict__[k]

            key = 'pk' if cache_on_save is True else cache_on_save
            # Django doesn't allow filters like related_id = 1337.
            # So we just hacky strip _id from end of a key
            # TODO: make it right, _meta.get_field() should help
            filter_key = key[:-3] if key.endswith('_id') else key

            cond = {filter_key: getattr(instance, key)}
            qs = sender.objects.inplace().filter(**cond).order_by()
            if MAX_GET_RESULTS:
                qs = qs[:MAX_GET_RESULTS + 1]
            qs._cache_results(qs._cache_key(), [instance])

            # Reverting stripped attributes
            instance.__dict__.update(unwanted_dict)
Exemplo n.º 17
0
    def _post_save(self, sender, instance, **kwargs):
        # Invoke invalidations for both old and new versions of saved object
        old = _old_objs.pop((get_thread_id(), sender, instance.pk), None)
        if old:
            invalidate_obj(old)
        invalidate_obj(instance)

        # NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
        #       but its base having one. Or vice versa.
        #       We still need to invalidate in this case, but cache on save better be skipped.
        if not instance._cacheprofile:
            return

        # Enabled cache_on_save makes us write saved object to cache.
        # Later it can be retrieved with .get(<cache_on_save_field>=<value>)
        # <cache_on_save_field> is pk unless specified.
        # This sweet trick saves a db request and helps with slave lag.
        cache_on_save = instance._cacheprofile.get('cache_on_save')
        if cache_on_save:
            # HACK: We get this object "from field" so it can contain
            #       some undesirable attributes or other objects attached.
            #       RelatedField accessors do that, for example.
            #
            #       So we strip down any _*_cache attrs before saving
            #       and later reassign them
            unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
            for k in unwanted_dict:
                del instance.__dict__[k]

            key = 'pk' if cache_on_save is True else cache_on_save
            # Django doesn't allow filters like related_id = 1337.
            # So we just hacky strip _id from end of a key
            # TODO: make it right, _meta.get_field() should help
            filter_key = key[:-3] if key.endswith('_id') else key

            cond = {filter_key: getattr(instance, key)}
            qs = sender.objects.inplace().filter(**cond).order_by()
            if MAX_GET_RESULTS:
                qs = qs[:MAX_GET_RESULTS + 1]
            qs._cache_results(qs._cache_key(), [instance])

            # Reverting stripped attributes
            instance.__dict__.update(unwanted_dict)
Exemplo n.º 18
0
    def _post_save(self, sender, instance, **kwargs):
        # Invoke invalidations for both old and new versions of saved object
        old = _old_objs.__dict__.pop((sender, instance.pk), None)
        if old:
            invalidate_obj(old)
        invalidate_obj(instance)

        if in_transaction() or not settings.CACHEOPS_ENABLED:
            return

        # NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
        #       but its base having one. Or vice versa.
        #       We still need to invalidate in this case, but cache on save better be skipped.
        cacheprofile = model_profile(instance.__class__)
        if not cacheprofile:
            return

        # Enabled cache_on_save makes us write saved object to cache.
        # Later it can be retrieved with .get(<cache_on_save_field>=<value>)
        # <cache_on_save_field> is pk unless specified.
        # This sweet trick saves a db request and helps with slave lag.
        cache_on_save = cacheprofile.get('cache_on_save')
        if cache_on_save:
            # HACK: We get this object "from field" so it can contain
            #       some undesirable attributes or other objects attached.
            #       RelatedField accessors do that, for example.
            #
            #       So we strip down any _*_cache attrs before saving
            #       and later reassign them
            unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
            for k in unwanted_dict:
                del instance.__dict__[k]

            key = 'pk' if cache_on_save is True else cache_on_save
            cond = {key: getattr(instance, key)}
            qs = sender.objects.inplace().filter(**cond).order_by()
            if MAX_GET_RESULTS:
                qs = qs[:MAX_GET_RESULTS + 1]
            qs._cache_results(qs._cache_key(), [instance])

            # Reverting stripped attributes
            instance.__dict__.update(unwanted_dict)
Exemplo n.º 19
0
def find_copies(path_to_dir, delete):
    dir_path = Path(path_to_dir)

    dir_iter = recursion_finder(dir_path)
    # group all files by size and filter 0-sized files
    file_sizes = group_by(get_file_size, dir_iter)
    file_sizes = select_keys(None, file_sizes)
    # get groups of files and filter one-members groups
    files_groups = filter(not_alone_item, file_sizes.values())

    # view copies grouped by sha1
    for copies in sha1_copies_from_groups(files_groups):

        show_list_files(copies, TEXTS['identical_files'])

        if not delete:
            continue

        # wait user input
        nums = get_nums_for_delete(copies)

        # if users choice not delete files
        if 0 in nums:
            continue

        files_to_delete = [copies[num - 1] for num in nums]

        # show files to delete
        show_list_files(files_to_delete, TEXTS['delete_list'])

        if click.confirm(TEXTS['confirm']):
            delete_files(files_to_delete)
            click.echo(TEXTS['delete_success'])
        else:
            click.echo(TEXTS['delete_aborted'])

        click.echo('=' * 20)
Exemplo n.º 20
0
 def char_info(self):
     return frozendict(select_keys({'left_char', 'right_char', 'char'}, asdict(self)))
Exemplo n.º 21
0
def without(call, *keys):
    """Return dictionary without the given keys"""
    return select_keys(lambda k: k not in keys, call())
Exemplo n.º 22
0
def dump_travis_env_vars():
    travis_env_vars = funcy.select_keys(lambda key: key.startswith('TRAVIS'),
                                        os.environ)
    logger.debug(travis_env_vars)
Exemplo n.º 23
0
def without(call, *keys):
    """Return dictionary without the given keys"""
    return select_keys(lambda k: k not in keys, call())