Exemplo n.º 1
0
def effort_user_timeline_chart_data(queryset: QuerySet) -> Dict:
    queryset_values = queryset.order_by("date").values(
        "date", "user__acronym").annotate(total_effort=Sum("effort"))
    items_by_user = pydash.group_by(queryset_values, "user__acronym")
    return {
        user__acronym: items_by_user[user__acronym]
        for user__acronym in items_by_user.keys()
    }
Exemplo n.º 2
0
def monthly_chart_data(queryset: QuerySet) -> Dict:
    series_queryset = EventType.objects.filter(
        events__in=queryset).distinct().order_by("name").values(
            "name", "colour")
    series_dict = {
        item["name"]: {
            "name": item["name"],
            "color": item["colour"],
            "data": []
        }
        for item in series_queryset
    }

    chart_data: Dict[str, Any] = {"categories": [], "series": []}
    data_queryset = (queryset.annotate(
        datetime=TruncMonth("start_datetime")).order_by(
            "datetime", "type__name").values(
                "datetime",
                "type__name").distinct().annotate(count=Count("id")))
    data_dict = [{
        **item, "month": item["datetime"].strftime("%Y-%m")
    } for item in data_queryset]

    structured_data_dict = pydash.group_by(data_dict, "month")
    for month, data in structured_data_dict.items():
        structured_data_dict[month] = pydash.group_by(data, "type__name")

    chart_data["categories"] = list(structured_data_dict.keys())
    for category in chart_data["categories"]:
        event_types = structured_data_dict[category].keys()
        for event_type, serie in series_dict.items():
            if event_type in event_types:
                series_dict[event_type]["data"].append(
                    structured_data_dict[category][event_type][0]["count"])
            else:
                series_dict[event_type]["data"].append(None)
    chart_data["series"] = list(series_dict.values())

    average_data = []
    for index in range(len(chart_data["categories"])):
        truly_values = pydash.compact(
            [serie["data"][index] for serie in chart_data["series"]])
        average_data.append(sum(truly_values))
    chart_data["average"] = mean(average_data) if len(average_data) else None

    return chart_data
Exemplo n.º 3
0
def adjust_signal_strength(
    signals: List[Signal],
    trials: int,
    aggregate_fn: Any = np.mean,
) -> List[Signal]:
    """
    Re-evaluate signal strength.

    Re-evaluation is based on frequency of occurrence and strength of each occurrence,
    normalized by trials.

    Here trials are total attempts made to generate signals.
    - Worst case could be, each attempt yields a unique signal. In this case the signal strength is dampened.
    - Best case, each attempt yields a single signal. In this case the signal strength is boosted.

    :param signals: A signal is a tuple of name and strength.
    :type signals: List[Signal]
    :param trials: Total attempts made to generate signals.
    :type trials: int
    :param aggregate_fn: A function to normalize a list of floating point values, defaults to np.mean
    :type aggregate_fn: Any, optional
    :param debug: Log if True.
    :type debug: bool
    :raises TypeError: :code:`aggregate_fn` if passed should be a :code:`callable`.
    :return: A list of normalized signal.
    :rtype: List[Signal]
    """
    # Group intents by name.
    if not callable(aggregate_fn):
        raise TypeError(
            "Expected aggregate_fn to be a callable that"
            f" operates on a list of floats. Found {type(aggregate_fn)} instead."
        )
    signal_groups = py_.group_by(signals, lambda signal: signal[const.SIGNAL.NAME])
    logger.debug("signal groups:")
    logger.debug(signal_groups)

    signals_ = sorted(
        [
            (
                signal_name,
                float(
                    # Averaging (or some other aggregate fn) signal strength values.
                    aggregate_fn([signal[const.SIGNAL.STRENGTH] for signal in signals])
                ),
                float(len(signals) / trials),
            )
            for signal_name, signals in signal_groups.items()
        ],
        key=lambda signal: signal[const.SIGNAL.REPRESENTATION],
        reverse=True,
    )

    logger.debug("sorted and ranked signals:")
    logger.debug(signals_)
    return signals_
Exemplo n.º 4
0
 def convert_to_pivot(start_date, end_date, results):
   """ Конвертация в сводную форму """
   g = group_by(results, 'employee.name')
   names = sorted_uniq(map_(results, 'employee.name'))
   list_result = list(
       map(lambda name: {'employee_name': name, 'results': g[name]}, names))
   print(list_result)
   list_result = ResultCtrl.fill_empty_day_in_result(
       start_date, end_date, list_result)
   return list_result
 def _get_extant_lines(
     self, manuscript_id: int
 ) -> Mapping[ManuscriptLineLabel, ExtantLine]:
     return pydash.group_by(
         (
             ExtantLine.of(line, manuscript_id)
             for line in self.lines
             if manuscript_id in line.manuscript_ids
             and line.get_manuscript_text_line(manuscript_id) is not None
         ),
         lambda extant_line: extant_line.label,
     )
Exemplo n.º 6
0
 def get_themes_by_ids(cls, ids, soft=True):
     res = db.session.query(cls.id, Theme).filter(
         cls.id.in_(ids),
         cls.id == ThemeProduct.product_id,
         ThemeProduct.theme_id == Theme.id,
         ThemeProduct.delete_time == None,
         Theme.delete_time == None,
     ).filter_by(soft=soft).order_by(cls.id.desc()).all()
     res = group_by(res, 'id')
     for k, v in res.items():
         res[k] = [i[1].name for i in v]
     return res
Exemplo n.º 7
0
def is_aeb_compact(aeb_list):
    '''
    Check if aeb space (aeb_list) is compact; uniq count must equal shape in each of a,e axes. For b, per unique a,e hash, uniq must equal shape.'''
    aeb_shape = util.get_aeb_shape(aeb_list)
    aeb_uniq = [len(np.unique(col)) for col in np.transpose(aeb_list)]
    ae_compact = np.array_equal(aeb_shape, aeb_uniq)
    b_compact = True
    for ae, ae_b_list in ps.group_by(aeb_list, lambda aeb: f'{aeb[0]}{aeb[1]}').items():
        b_shape = util.get_aeb_shape(ae_b_list)[2]
        b_uniq = [len(np.unique(col)) for col in np.transpose(ae_b_list)][2]
        b_compact = b_compact and np.array_equal(b_shape, b_uniq)
    aeb_compact = ae_compact and b_compact
    return aeb_compact
Exemplo n.º 8
0
def is_aeb_compact(aeb_list):
    '''
    Check if aeb space (aeb_list) is compact; uniq count must equal shape in each of a,e axes. For b, per unique a,e hash, uniq must equal shape.'''
    aeb_shape = util.get_aeb_shape(aeb_list)
    aeb_uniq = [len(np.unique(col)) for col in np.transpose(aeb_list)]
    ae_compact = np.array_equal(aeb_shape, aeb_uniq)
    b_compact = True
    for ae, ae_b_list in _.group_by(aeb_list, lambda aeb: f'{aeb[0]}{aeb[1]}').items():
        b_shape = util.get_aeb_shape(ae_b_list)[2]
        b_uniq = [len(np.unique(col)) for col in np.transpose(ae_b_list)][2]
        b_compact = b_compact and np.array_equal(b_shape, b_uniq)
    aeb_compact = ae_compact and b_compact
    return aeb_compact
Exemplo n.º 9
0
def _print_category(events, cat="Uncategorized", n=10):
    print(f"Showing top {n} from category: {cat}")
    events = [
        e for e in sorted(events, key=lambda e: -e.duration)
        if cat in e.data["$tags"]
    ]
    print(f"Total time: {sum((e.duration for e in events), timedelta(0))}")
    groups = {
        k: (v[0].data, sum((e.duration for e in v), timedelta(0)))
        for k, v in pydash.group_by(
            events, lambda e: e.data.get("title", "unknown")).items()
    }
    for _, (v, duration) in list(sorted(groups.items(),
                                        key=lambda g: -g[1][1]))[:n]:
        print(str(duration).split(".")[0], f"{v['title'][:60]} [{v['app']}]")
Exemplo n.º 10
0
  def bulk_import_gs_as_biographies_activities(self, url=None, owner=None, sheet=None, **options):
    """
    usage:
    python -W ignore manage.py task bulk_import_gs_as_biographies_activities --url=<your url> --sheet=activities
    """
    rows, headers = utils.bulk_import_gs(url=url, sheet=sheet, use_cache=options['cache'], required_headers=['person_slug'])
    
    # group rows by person_slug
    people = pyd.group_by(rows, 'person_slug')
    #print people
    data_paths =  utils.data_paths(headers=headers) 
    #print data_paths
    # basic data structure based on headers column
    data_structure = {}

    for i, path, is_list in data_paths:
      utils.nested_set(data_structure, path, {})
    #print data_structure

    def mapper(d):
      #print d
      _d = {
        'sorting': pyd.get(d, u'data__activity__sorting', ''),
        'type': pyd.get(d, u'data__activity__type', ''),
        'description': {
          'en_US': pyd.get(d, u'data__activity__description__en_US', ''),
          'fr_FR': pyd.get(d, u'data__activity__description__fr_FR', '')
        },
        'date': {
          'en_US': pyd.get(d, u'data__activity__date__en_US', ''),
          'fr_FR': pyd.get(d, u'data__activity__date__fr_FR', '')
        },
        'start_date':  pyd.get(d, u'data__activity__start_date'),
        'end_date':  pyd.get(d, u'data__activity__end_date')
      }
      return _d



    for slug, activities in people.iteritems():
      logger.debug('adding %s activities to document {slug:%s}' % (len(activities), slug))
      
      doc = Document.objects.get(slug=slug,type=Document.ENTITY)

      doc.data['activities'] = map(mapper, activities)
      doc.save()
Exemplo n.º 11
0
 def test_pydash(self):
     results = Session.query(Result).all()
     g = group_by(results, 'employee.name')
     # print(g)
     names = sorted_uniq(map_(results, 'employee.name'))
     # print(names)
     l = list(
         map(lambda name: {
             'employee_name': name,
             'results': g[name]
         }, names))
     # print(l)
     start_date = date.today().strftime('%Y-%m-%d')
     end_date = (date.today() + timedelta(days=2)).strftime('%Y-%m-%d')
     ret = ResultCtrl.fill_empty_day_in_result(start_date, end_date, l)
     # print('---------------------')
     # print(ret[0]['results'])
     self.assertEqual(len(ret[0]['results']), 3)
     self.assertEqual(len(ret[1]['results']), 3)
Exemplo n.º 12
0
def adjust_signal_strength(
    signals: List[Signal],
    trials: int,
    aggregate_fn: Any = np.mean,
) -> List[Signal]:
    """
    Re-evaluate signal strength.

    Re-evaluation is based on frequency of occurrence and strength of each occurrence,
    normalized by trials.

    Here trials are total attempts made to generate signals.
    - Worst case could be, each attempt yields a unique signal. In this case the signal strength is dampened.
    - Best case, each attempt yields a single signal. In this case the signal strength is boosted.

    Args:
        signals (List[Signal]): A signal is a tuple of name and strength.
        trials (int): Total attempts made to generate signals.

    Returns:
        List[Signal]: A list of strength adjusted signals. May not be as long as the input.
    """
    # Group intents by name.
    if not callable(aggregate_fn):
        raise TypeError(
            "Expected aggregate_fn to be a callable that"
            f" operates on a list of floats. Found {type(aggregate_fn)} instead."
        )
    signal_groups = py_.group_by(signals, lambda signal: signal[0])
    signals_ = [
        (
            signal_name,
            float(
                # Averaging (or some other aggregate fn) signal strength values.
                aggregate_fn(
                    [signal[const.SIGNAL.STRENGTH] for signal in signals])),
            float(len(signals) / trials),
        ) for signal_name, signals in signal_groups.items()
    ]
    return sorted(signals_,
                  key=lambda signal: signal[const.SIGNAL.STRENGTH],
                  reverse=True)
Exemplo n.º 13
0
def aggregate_scores(user, test, cycles, formulation, keys, count_values,
                     filters):
    scores_filter = {}
    if user and hasattr(user, "access_level") and hasattr(user, "access_area"):
        access_level = user.access_level
        access_area = user.access_area
        if access_level and access_area:
            scores_filter[access_level.lower()] = access_area
    if user.is_superuser:
        scores_filter = filters
    score_objects = Score.objects.filter(**scores_filter).values(
        "data", "cycle")
    grouped_objects = pydash.group_by(score_objects, lambda x: x["cycle"])

    def get_count_key(value):
        value_as_dict = value.get("data")
        return value_as_dict.get(test, {}).get(
            formulation, None) if type(value_as_dict) is dict else None

    def agg(value):
        values = grouped_objects.get(value, [])
        result = {"cycle": value}
        total = len(values)
        yes_count_value = count_values[YES]
        no_count_value = count_values[NO]
        not_reporting_count_value = count_values[NOT_REPORTING]
        if total > 0:
            counts = pydash.count_by(values, get_count_key)
            yes_count = counts.get(yes_count_value, 0)
            no_count = counts.get(no_count_value, 0)
            not_reporting_count = counts.get(not_reporting_count_value, 0)
            result[keys[YES]] = (yes_count * 100 / float(total))
            result[keys[NO]] = (no_count * 100 / float(total))
            result[keys[NOT_REPORTING]] = (not_reporting_count * 100 /
                                           float(total))
        else:
            result[keys[YES]] = 0
            result[keys[NO]] = 0
            result[keys[NOT_REPORTING]] = 0
        return result

    return pydash.map_(cycles, agg)
Exemplo n.º 14
0
def renderImage(data, out):

    data = list(sorted(data, key=itemgetter('Org_Disc_Price')))
    data_grouped = group_by(data,
                            lambda row: round(row['Org_Disc_Price'] / 10) * 10)

    groups_count = len(data_grouped.keys())
    font_size = 25
    line_height = font_size + 5
    padding_left = 50
    font = ImageFont.truetype("./OpenSans-Regular.ttf", font_size)
    img = Image.new("RGB", (1024, line_height * (len(data) + groups_count)),
                    bg_color)
    draw = ImageDraw.Draw(img)
    current_y = 0
    draw.text((padding_left, current_y),
              ('>' * 20 + 'Generated {0}' + '<' * 20).format(
                  datetime.datetime.now()),
              font_color,
              font=font)

    current_y += line_height
    for group_name, group in data_grouped.items():
        draw.text((padding_left, current_y),
                  (' ' * 20 + '>' * 20 + '{0} - {1} SGD' + '<' * 20).format(
                      group_name, group_name + 10),
                  font_color,
                  font=font)
        current_y += line_height
        for row in group:
            draw.text((padding_left, current_y),
                      '{0}'.format(stripVolume(row['ProductGroupTitle'])),
                      font_color,
                      font=font)
            current_y += line_height

    draw = ImageDraw.Draw(img)
    img.save(out)
Exemplo n.º 15
0
def aggregate_scores(user, test, cycles, formulation, keys, count_values, filters):
    scores_filter = {}
    if user:
        access_level = user.access_level
        access_area = user.access_area
        if access_level and access_area:
            scores_filter[access_level.lower()] = access_area
    if user.is_superuser:
        scores_filter = filters
    score_objects = Score.objects.filter(**scores_filter).values(test, "cycle")
    grouped_objects = pydash.group_by(score_objects, lambda x: x["cycle"])

    def get_count_key(value):
        value_as_dict = json.loads(value[test])
        return value_as_dict.get(formulation, None) if type(value_as_dict) is dict else None

    def agg(value):
        values = grouped_objects.get(value, [])
        result = {'cycle': value}
        total = len(values)
        yes_count_value = count_values[YES]
        no_count_value = count_values[NO]
        not_reporting_count_value = count_values[NOT_REPORTING]
        if total > 0:
            counts = pydash.count_by(values, get_count_key)
            yes_count = counts.get(yes_count_value, 0)
            no_count = counts.get(no_count_value, 0)
            not_reporting_count = counts.get(not_reporting_count_value, 0)
            result[keys[YES]] = (yes_count * 100 / float(total))
            result[keys[NO]] = (no_count * 100 / float(total))
            result[keys[NOT_REPORTING]] = (not_reporting_count * 100 / float(total))
        else:
            result[keys[YES]] = 0
            result[keys[NO]] = 0
            result[keys[NOT_REPORTING]] = 0
        return result

    return pydash.collect(cycles, agg)
Exemplo n.º 16
0
    def define_products(self, data):
        """
        处理商品数据
        1 按商品ID分组;
        2 分组后的数据合并数值成为一个字典, 数量与金额相加, 其它不变;
        3 将处理完的数据(一个字典)添加进新的数组并返回
        """
        def ass(obj_v, src_v, k, obj, src):
            if k == 'id' or k == 'name' or k == 'image':
                return obj_v
            elif k == 'count':
                return obj_v + src_v
            else:
                return Decimal(obj_v) + Decimal(src_v)

        group = group_by(data, 'id')
        res = []
        for item in group.values():
            if len(item) > 1:
                obj = assign_with(*item, ass)
            else:
                obj = item[0]
            res.append(obj)
        return res
Exemplo n.º 17
0
def file_upload_batch():
    try:
        fileHandler = FileHandler()
        # {
        # files:[{binary1}, {binary2}]
        # files_info: {sub_dir: '', file_key: '', curr_chunk: 1, total_chunks: 10, md5: 'abcdefghijklmn' }
        # | [
        #   {file_id: 1,sub_dir: '', file_key: '', curr_chunk: 1, total_chunks: 10, md5: 'abcdefghijklmn'},
        #   {file_id: 2,sub_dir: '', file_key: '', curr_chunk: 2, total_chunks: 10, md5: 'abcdefghijklmn'}
        # ]
        # }
        # 文件或分片列表,可以包含小的完整文件和部分分片混合
        files = []
        for f in request.files:
            files.append(request.files[f])

        # 文件信息
        orignal_files_info = request.form.get('files_info')
        orignal_files_info = json.loads(orignal_files_info) if isinstance(
            orignal_files_info, str) else orignal_files_info
        files_info = orignal_files_info
        if not files:
            return standard_expection('文件列表为空.')
        # 支持同时上传批量和同时上传单个
        if not isinstance(files_info, (list, dict)):
            return standard_expection('文件信息无法解析.')
        if isinstance(files_info, list) and len(files_info) != len(files):
            return standard_expection('文件信息与文件对象不匹配.')
        if isinstance(files_info, dict) and len(files) != 1:
            return standard_expection('文件信息与文件对象不匹配.')

        tenant_id = g.tenant_id if hasattr(g, 'tenant_id') else 0
        user_id = g.user_id if hasattr(g, 'user_id') else 0
        cache_expired_time = current_app.config['REDIS_CACHE_EXPIRE_FILE']

        files_info = [files_info] if isinstance(files_info,
                                                dict) else files_info

        if _.some(
                files_info, lambda x: x.get('total_chunks') is None or
            (x.get('curr_chunk') is not None and not _.is_integer(
                x.get('curr_chunk')))):
            return standard_expection('文件信息格式错误 files_info.')

        file_groups = _.group_by(files_info, lambda x: x.get('file_key'))
        files_set = {}
        for f in files:
            files_set[f.name] = f

        msg_list = []
        md5_error_list = []
        from app import aios_redis
        index = 0
        for file_key, file_partations_info in file_groups.items():
            # file_key: 每个文件的唯一标识, 如果分片,所有分片的file_key应该一致
            # file_partations_info: 一组分片(有可能是一个大文件的一部分分片)或单个完整文件信息
            for file_partation in file_partations_info:
                id = file_partation.get('id')
                # 文件原始名
                file_name = file_partation.get('file_name')
                # 默认约定目录为 <模块>/<企业>/<自定义目录>/原始文件名
                tenant_id = tenant_id
                sub_dir = file_partation.get(
                    'sub_dir', os.path.join('cs', str(tenant_id), file_key))
                # curr_chunk 从1开始
                curr_chunk = file_partation.get('curr_chunk')
                # 文件总分片数
                total_chunks = file_partation.get('total_chunks')
                # md5值,如果有则做CRC校验
                md5 = file_partation.get('md5')
                # 文件对象
                file = files_set[id]
                # file = files[index]
                index += 1

                absolute_dir_path = fileHandler.get_standard_sub_dir(sub_dir)
                absolute_file_path = os.path.join(absolute_dir_path,
                                                  f'{file_key}.{curr_chunk}')

                # 防止多次重传的问题
                # is_valid = True
                # for f in os.listdir(absolute_dir_path):
                #     # 有可能出现文件名中包含"-"
                #     if '-' in f.split('.')[1]:
                #         [(start, end)] = re.findall('.*\.(\d+)-(\d+)$', f)
                #         if int(start) <= int(curr_chunk) <= int(end):
                #             is_valid = False
                #             print('=====分片已存在', f, f'{file_key}.{curr_chunk}')
                #             if os.path.exists(absolute_file_path):
                #                 os.rename(absolute_file_path, absolute_file_path + '.deleted')
                #             continue
                # if is_valid is False:
                #     continue
                if os.path.exists(absolute_file_path):
                    os.remove(absolute_file_path)
                file.save(absolute_file_path)

                # 文件md5校验
                # if md5:
                #     is_valid, msg = fileHandler.valid_md5(absolute_file_path, md5)
                #     if not is_valid:
                #         md5_error_list.append({'file_key': file_key, 'curr_chunk': curr_chunk})
                #         continue

                aios_redis.set(f'plus_uploader:{file_key}:{curr_chunk}',
                               'done', cache_expired_time)

                fileHandler.log_print(file_key, curr_chunk,
                                      f'{curr_chunk}/{total_chunks}')

                # 发布消息,通知后台线程开始尝试合并文件
                msg = {
                    'file_key': file_key,
                    'dir_path': absolute_dir_path,
                    'curr_chunk': curr_chunk,
                    'total_chunks': total_chunks,
                    'file_name': file_name,
                    'tenant_id': tenant_id,
                    'user_id': user_id,
                    'cache_expired_time': cache_expired_time
                }
                msg_list.append(msg)

        if len(md5_error_list):
            print('文件MD5校验异常')
            return standard_expection(json.dumps(md5_error_list))

        succ_list, err_list = fileHandler.multi_process_handler(msg_list)
        if len(err_list):
            print('文件合并异常')
            return standard_expection(json.dumps(err_list))

        partations_info = []
        # 容器内目录
        container_dir = os.path.join(os.getenv('FLASK_CONFIG'),
                                     sub_dir.strip(os.path.sep)).replace(
                                         os.path.sep, '/')
        # 获取当前分片完成状态与存放目录
        for succ in succ_list:
            partations_info.append({
                'file_key': succ['file_key'],
                'curr_chunk': succ['curr_chunk'],
                'status': True,
                'host': container_dir,
                'msg': 'ok'
            })
        print('<成功>', orignal_files_info)
        return standard_response(partations_info, 200)
    except Exception as err:
        import traceback
        traceback.print_exc()
        print('<失败>')
        return standard_expection(str(err))
Exemplo n.º 18
0
 def get_context_data(self, **kwargs):
     strains = Strain.objects.order_by('name').values('name', 'webId')
     strains = pydash.group_by(strains, lambda strain: strain['name'][0].lower() if strain['name'][0].isalpha() else '#')
     kwargs['strains'] = sorted(strains.items())
     return super(StrainListView, self).get_context_data(**kwargs)
Exemplo n.º 19
0
 def stack_by(self, iteratee=None):
     """Group items returned by :meth:`all` using `iteratee`."""
     return pyd.group_by(self.all(), iteratee)
Exemplo n.º 20
0
def test_group_by(case, expected):
    assert _.group_by(*case) == expected
Exemplo n.º 21
0
def extract_locations_from_records(records):
    grouped_by_location = pydash.group_by(records, lambda item: item.location)
    locations = list(grouped_by_location.keys())
    return dict((loc, loc) for loc in locations)
Exemplo n.º 22
0
def group_by_url_hostname(events: List[Event]) -> Dict[str, List[Event]]:
    return pydash.group_by(events, lambda e: _hostname(e.data["url"]))
Exemplo n.º 23
0
 def get_context_data(self, **kwargs):
     historys = History.objects.order_by('standing')
     kwargs['historys'] = pydash.group_by(historys, lambda history: history.get_period_display())
     return super(HistoryListView, self).get_context_data(**kwargs)
Exemplo n.º 24
0
 def stack_by(self, callback=None):
     """Group items returned by :meth:`all` using `callback`."""
     return pyd.group_by(self.all(), callback)
Exemplo n.º 25
0
def effort_role_timeline_chart_data(queryset: QuerySet) -> Dict:
    queryset_values = queryset.order_by("date").values(
        "date", "role").annotate(total_effort=Sum("effort"))
    items_by_role = pydash.group_by(queryset_values, "role")
    return {role: items_by_role[role] for role in items_by_role.keys()}
Exemplo n.º 26
0
    def update_parser_for_functions(self, modul_name, parsers, class_ref):

        parent_path_parser = parsers[modul_name]

        for function_name, function_ref in inspect.getmembers(class_ref):
            if inspect.isfunction(function_ref):
                groups = {}
                if not "callables" in parent_path_parser:
                    parent_path_parser['callables'] = {}
                if not "subparsers" in parent_path_parser:
                    parent_path_parser['subparsers'] = parent_path_parser[
                        'parser'].add_subparsers(
                            help=self.get_config_value(modul_name, 'sub_help'))
                if hasattr(function_ref, '_action_param_action'):
                    if not 'action_nargs' in parent_path_parser:
                        parent_path_parser['action_nargs'] = {}

                    parent_path_parser['action_nargs'][
                        function_ref.
                        __name__] = function_ref._action_param_nargs
                    parent_path_parser['parser'].add_argument(
                        f'--{function_name}',
                        action=function_ref._action_param_action(
                            *function_ref._action_param_args,
                            **function_ref._action_param_kwargs),
                        nargs=function_ref._action_param_nargs)
                    continue
                if "subparsers" in parent_path_parser and hasattr(
                        parent_path_parser['subparsers'],
                        'choices') and function_name in parent_path_parser[
                            'subparsers'].choices:
                    parser = parent_path_parser['subparsers'].choices[
                        function_name]
                else:
                    parser = parent_path_parser['subparsers'].add_parser(
                        function_name,
                        description=self.get_config_value(
                            f'{modul_name}.{function_name}', 'description'))
                if hasattr(function_ref, '_arg_groups'):
                    groups = map_values(
                        group_by(function_ref._arg_groups, 'name'),
                        lambda groupArray: self.add_group_to_parrser(
                            parser, groupArray[-1]))
                if function_name in parent_path_parser['callables']:
                    sys.stderr.write(
                        f'{function_name} in {parent_path_parser["callables"]["class_ref"].__name__} is being overwritten by {function_name} in {class_ref.__name__}'
                    )

                parent_path_parser['callables'][function_name] = {
                    "parser": parser,
                    "class_ref": class_ref,
                    "function_name": function_name,
                    "groups": groups
                }

                for param in inspect.signature(
                        function_ref).parameters.values():

                    parser = parent_path_parser['callables'][function_name][
                        'parser']
                    if param.annotation.__class__ == ParserArgType:

                        args = tuple([f'--{param.name}']) if len(
                            param.annotation.args
                        ) == 0 else param.annotation.args
                        if len(param.annotation.args
                               ) > 0 and 'dest' not in param.annotation.kwargs:
                            param.annotation.kwargs['dest'] = param.name
                        if hasattr(param.annotation, 'group'):
                            group = get(groups, param.annotation.group)
                            if group:
                                group.add_argument(*args,
                                                   **param.annotation.kwargs)
                            else:
                                raise Exception(
                                    f'it appears that the group "{param.annotation.group}" is referenced by an arguement but not found when building the parser existing groups are {json.dumps(list(groups.keys()))}'
                                )
                        else:
                            parser.add_argument(*args,
                                                **param.annotation.kwargs)
                    if param.annotation == int:
                        parser.add_argument(f'--{param.name}',
                                            type=param.annotation)
                    if param.annotation == str:
                        parser.add_argument(f'--{param.name}',
                                            type=param.annotation)
Exemplo n.º 27
0
def get_functions(members):
    return [(v['name'], v) for v in py_.group_by(
        members.values(), lambda v: v['kind'])['FUNCTION_DECL']
            if v['result_type'] and not v['name'].startswith('operator ')
            and not any([a['kind'] == 'POINTER' for a in v['arguments']])
            and all([a['name'] for a in v['arguments']])]
Exemplo n.º 28
0
class OutputGridSizeSolver:
    cache = {}
    dtype: Tuple[int, int].__origin__
    functions = [
        OutputGridSizeTransforms.identity,
        OutputGridSizeTransforms.fixed_size,
        OutputGridSizeTransforms.ratio,
    ]
    arguments = py.group_by(
        [Query.grid_size_ratio_task, Query.count_nonzero, Query.unique_colors],
        lambda f: inspect.signature(f).return_annotation)

    def __init__(self):
        pass

    @classmethod
    def equals(cls, input: Tuple[int, int], output: Tuple[int, int]):
        return input == output

    def fit_predict(self, task: Task):
        rule = self.fit(task['train'])
        predict = self.predict(task['test'])
        return predict

    def fit(self, problemset: Union[ProblemSet, Task]):
        if isinstance(problemset, Task): return self.fit(problemset['train'])

        rule = None
        ratios = list({(
            problem['input'].shape[0] / problem['output'].shape[0],
            problem['input'].shape[1] / problem['output'].shape[1],
        )
                       for problem in problemset})
        if len(ratios) == 1:
            ratio = ratios[0]
            if ratio == (1, 1):
                rule = Rule(lambda input: input,
                            input_transform=self.transform,
                            name='GridSizeSame')

            elif (float(ratio[0]).is_integer()
                  and float(ratio[1]).is_integer()):
                rule = Rule(lambda input, ratio:
                            (input[0] * ratio[0], input[1] * ratio[1]),
                            args=[ratio],
                            input_transform=self.transform,
                            name='GridSizeIntegerMultiple')

            elif float(1 / ratio[0]).is_integer() and float(
                    1 / ratio[1]).is_integer():
                rule = Rule(lambda input, ratio:
                            (input[0] * ratio[0], input[1] * ratio[1]),
                            args=[ratio],
                            input_transform=self.transform,
                            name='GridSizeIntegerDivisor')
            else:
                rule = Rule(lambda input, ratio:
                            (input[0] * ratio[0], input[1] * ratio[1]),
                            args=[ratio],
                            input_transform=self.transform,
                            name='GridSizeInputRatio')
        else:
            output_shapes = list(
                {problem['output'].shape
                 for problem in problemset})
            if len(output_shapes) == 1:
                rule = Rule(lambda input, output_shape: output_shape,
                            args=[output_shapes[0]],
                            input_transform=self.transform,
                            name='GridSizeFixedSizeOutput')

        self.cache[problemset.filename] = rule
        return rule

    def transform(self, input: np.ndarray):
        return input.shape

    def test(self, problemset: ProblemSet, rule: Rule = None):
        if isinstance(problemset, Task): return self.fit(problemset['train'])
        if not rule: rule = self.cache[problemset.filename]
        for problem in problemset:
            input = self.transform(problem['input'])
            output = self.transform(problem['output'])
            guess = rule(input)
            if not np.equal(guess, output):
                return False
        return True

    def predict(self,
                problemset: Union[ProblemSet, Task],
                rule: Rule = None,
                *args,
                task: Task = None,
                **kwargs) -> Union[None, List[np.ndarray]]:
        task = task or (problemset
                        if isinstance(problemset, Task) else problemset.task)
        problemset = (problemset['test']
                      if isinstance(problemset, Task) else problemset)
        if task.filename not in self.cache: self.fit(task)
        if self.cache[task.filename] is None: return None  # Unsolvable mapping

        if not rule: rule = self.cache[problemset.filename]
        if not callable(rule): return None
        return [rule(problem['input']) for problem in problemset]
Exemplo n.º 29
0
def company(request, kind=""):
    filter_kwargs={kind: True}
    companys = Company.objects.filter(**filter_kwargs)
    companys = pydash.group_by(companys, lambda company: company.get_continent_display())
    return render(request, 'canna_cross/company_list.html', {'companys': companys})