コード例 #1
0
ファイル: api.py プロジェクト: ljb-2000/SnDB
 def get(self):
     result = []
     ips = request.args.get('ip', '').strip()
     try:
         if ips:
             for ip in ips.split(','):
                 host_dict = {}
                 private_ip = PrivateIp.query.filter_by(ip=ip).first()
                 if private_ip:
                     host_dict['env'] = private_ip.host.env.name
                     host_dict['services'] = ','.join(
                         list(
                             flatten(
                                 private_ip.host.services.values(
                                     Service.name))))
                     host_dict['ip'] = ip
                     result.append(host_dict)
                     continue
                 publish_ip = PublishIp.query.filter_by(ip=ip).first()
                 if publish_ip:
                     host_dict['env'] = publish_ip.host.env.name
                     host_dict['services'] = ','.join(
                         list(
                             flatten(
                                 publish_ip.host.services.values(
                                     Service.name))))
                     host_dict['ip'] = ip
                     result.append(host_dict)
                     continue
         return {'data': result, 'status': 1}
     except:
         return {'data': [], 'status': 0}
コード例 #2
0
def CalcEffectSize(sc_dict,
                   sp_dict,
                   logr_indices,
                   save_fx_size=False,
                   save_name='effect_sizes.txt',
                   printTop=5):
    '''calculates effect size for all significant genes'''

    print('...calculating effect sizes...')
    fx_dict = {}
    mean_dict = {}
    for gene in sc_dict:
        if gene in sp_dict:
            if len(sc_dict[gene]) > 0 and len(sp_dict[gene]) > 0:
                all_sp_obs = list(flatten(sp_dict[gene]))
                all_sc_obs = list(flatten(sc_dict[gene]))
                sp_mean = np.nanmean(all_sp_obs)
                sc_mean = np.nanmean(all_sc_obs)
                fx_dict[gene] = []

                for i in range(
                        len(logr_indices)
                ):  #fx size is absolute value of difference between means of individual brs logratios
                    if sc_dict[gene][i] == np.nan:
                        fx_dict[gene].append(np.nan)
                    else:
                        fx_dict[gene].append(sp_mean -
                                             np.nanmean(sc_dict[gene][i]))
                fx_dict[gene].append(
                    sp_mean - sc_mean
                )  #also add mean for context https://github.com/mwaskom/seaborn/issues/375

    print('...done calculating effect sizes!')

    print(sc_dict['YLR397C'])

    if printTop > 0:  #print the top some number of effect sizes
        print('top ' + str(printTop) + ' effect sizes:')
        for i in range(printTop):
            for gene in sorted(fx_dict, key=fx_dict.get, reverse=True):
                if printTop > 0:
                    print(gene, fx_dict[gene])
                    printTop -= 1

    if save_fx_size == True:
        print('...saving outfile:')
        wf = open(save_name, 'w')
        header = 'gene'
        for i in range(len(logr_indices)):
            header += '\t' + temp + '_' + str(i + 1)
        wf.writelines(header + '\tmean\n')

        for gene in sorted(fx_dict, key=fx_dict.get, reverse=True):
            gene_str = gene
            for i in range(len(fx_dict[gene])):
                gene_str += '\t' + str(fx_dict[gene][i])
            wf.writelines(gene_str + '\n')
        wf.close()
    return fx_dict, mean_dict
コード例 #3
0
ファイル: api.py プロジェクト: ljb-2000/SnDB
def _service_host_info(host_id):
    _host = Host.query.get(host_id)
    if _host:
        _host_dict = {}
        _host_dict['host_id'] = _host.id
        _host_dict['env'] = _host.env.name
        _host_dict['private_ip'] = ','.join(
            list(flatten(_host.private_ips.values(PrivateIp.ip))))
        _host_dict['publish_ip'] = ','.join(
            list(flatten(_host.publish_ips.values(PublishIp.ip))))
        return _host_dict
    else:
        return ''
コード例 #4
0
def apply_model_stream(docs):
    global model

    ids = _.pluck('id', docs)
    msgs = map(lambda x: _.flatten([x])[0], _.pluck('msg', docs))

    z = model['vect'].transform(msgs)
    z = model['ch2'].transform(z)
    z = model['tfidf'].transform(z)
    pred = model['clf'].predict_proba(z)

    for i in range(0, len(ids)):
        yield {
            "_id": ids[i],
            "_type": config['elasticsearch']['_type'],
            "_index": config['elasticsearch']['_to_index'],
            "_op_type": "update",
            "doc": {
                '__meta__': {
                    'tri_pred': {
                        'neg': float(pred[i][0]),
                        'neut': float(pred[i][1]),
                        'pos': float(pred[i][2])
                    }
                }
            }
        }
コード例 #5
0
        def common_neighbour_set(char):
            perm_neighbours = lambda cperm: set(flatten(
                [cc.left_char, cc.right_char] for cc in cperm if cc.char == char
            ))

            common_neigbours = reduce(operator.__and__, map(perm_neighbours, context_perms))
            return common_neigbours - {None}
コード例 #6
0
def apply_model_stream(docs):
    global model
    
    ids  = _.pluck('id', docs)
    msgs = map(lambda x: _.flatten([x])[0], _.pluck('msg', docs))
    
    z    = model['vect'].transform(msgs)
    z    = model['ch2'].transform(z)
    z    = model['tfidf'].transform(z)
    pred = model['clf'].predict_proba(z)
    
    for i in range(0, len(ids)):
        yield {
            "_id"      : ids[i],
            "_type"    : config['elasticsearch']['_type'],
            "_index"   : config['elasticsearch']['_to_index'],
            "_op_type" : "update",
            "doc" : {
                '__meta__' : {
                    'tri_pred' : {
                        'neg'  : float(pred[i][0]),
                        'neut' : float(pred[i][1]),
                        'pos'  : float(pred[i][2])
                    }
                }
            }
        }
コード例 #7
0
ファイル: lang.py プロジェクト: jacob414/micropy
def unfold_gen(x: Generator[Any, None, None],
               cast: type = tuple) -> Iterable[Any]:
    """Quick recursive unroll of possibly nested (uses funcy library under
    the hood)

    """
    return cast(funcy.flatten(x, isgen))
コード例 #8
0
ファイル: monitor.py プロジェクト: wttfire/redash
def rq_job_ids():
    queues = Queue.all(connection=redis_connection)

    started_jobs = [StartedJobRegistry(queue=q).get_job_ids() for q in queues]
    queued_jobs = [q.job_ids for q in queues]

    return flatten(started_jobs + queued_jobs)
コード例 #9
0
 def get_values(observation):
     return flatten(concat(
         observation['my_car'].values(),
         mapcat(methodcaller('values'), sorted(observation['other_cars'], key=itemgetter('position_length'))),  # 距離が近い順にソートします。前後も分けたほうが良い?
         mapcat(methodcaller('values'), sorted(observation['obstacles' ], key=itemgetter('position_length'))),  # noqa: E202
         mapcat(methodcaller('values'), sorted(observation['stars'     ], key=itemgetter('position_length')))   # noqa: E202
     ))
コード例 #10
0
ファイル: config_parser.py プロジェクト: tate11/etl_bonobo
 def __init__(self, etl_config_dict):
     self.dict = etl_config_dict
     if 'DATA_SOURCES' not in self.dict:
         raise ETLConfigException('DATA_SOURCES missing in the etl config')
     self.data_source_domains = {d['domain'] for d in self.data_sources}
     self.pipeline_domains = set(
         funcy.flatten([d['input_domains'] for d in (self.pipeline or [])]))
     self.etl_data_sources = [{
         'input_domains': d['domain'],
         'load': d.get('load', True)
     } for d in self.data_sources
                              if d['domain'] in self.data_source_domains -
                              self.pipeline_domains]
     self.etls = [
         ETLParams(**ds)
         for ds in self.etl_data_sources + (self.pipeline or [])
     ]
     self.output_domains = [
         domain for etl in self.etls
         for domain in (etl.output_domains or [])
     ]
     self.input_domains = [
         domain for etl in self.etls for domain in etl.input_domains
     ]
     self.functions = [
         etl.function_name for etl in self.etls
         if etl.function_name is not None
     ]
コード例 #11
0
def buildDict(prefix, urlx):
    rd = jutils.getHttpCsv(urlx)
    rdict = {}
    wlist = []
    for row in rd:
        cols = row.split(',')
        if len(cols) > 5:
            wlist.append(cols[5])
    w2list = [i for i in wlist if len(i) >= CHAR_MIN and len(i) <= CHAR_MAX]
    #print jj(w2list[:50])
    #pr = partListToDict(w2list,keyFuncFirstChar)
    pr = funcy.group_by(lambda x: x[0], w2list)
    c = Counter([i[0] for i in w2list]).most_common(MOST_LIMIT)
    # print jj(c[:24])
    # print jj(pr[c[0][0]])
    fmck = [i[0] for i in c]
    fmcv = [i[1] for i in c]
    # 256*8=2048
    # 512*4=2048
    wordlist = funcy.flatten([pr[x][:4] for x in fmck])
    rdict['data'] = funcy.select_keys(lambda x: x in fmck, pr)
    rdict['meta'] = {
        'source': urlx,
        'wordlist': wordlist,
        'firstMostCommonKey': fmck,
        'firstMostCommonCount': fmcv,
        'host': 'http://data.gov.tw',
        'build': 'http://console.tw',
        'script':
        'https://github.com/y12studio/console-tw/tree/master/projects/datachart/',
        'prefix': prefix,
        'time': datetime.datetime.utcnow().isoformat()
    }
    return rdict
コード例 #12
0
ファイル: service.py プロジェクト: dpays/dpay-notifications
async def _main_task(database_url=None,
                     loop=None,
                     dpayd_url=None,
                     start_block=None):
    logger.debug('main task starting')
    loop = loop or asyncio.get_event_loop()
    dpayd = dpay.dpayd.DPayd(nodes=[dpayd_url])
    blockchain = Blockchain(dpayd_instance=dpayd)
    pool = await create_asyncpg_pool(database_url=database_url, loop=loop)

    last_block_num_handled = None

    loop_elapsed = 0
    async for op in ops_iter(blockchain, start_block):
        loop_start = time.perf_counter()
        logger.debug('main task', loop_elapsed=loop_elapsed, op=op)
        block_num = op['block']
        unstored_notifications = list(flatten(gather_notifications(op)))
        logger.debug('main_task',
                     block_num=block_num,
                     unstored_count=len(unstored_notifications))
        resp = await store_notifications(unstored_notifications, pool)
        if resp:
            last_block_num_handled = block_num
        loop_elapsed = time.perf_counter() - loop_start
コード例 #13
0
 def _expand_verses(raw):
     """Converts "3,4,5" or "3-5" to [3, 4, 5]"""
     if ',' in raw:
         return list(flatten(map(_expand_verses, raw.split(','))))
     elif '-' in raw:
         components = list_map(int, raw.split('-'))
         return list(range(components[0], components[-1] + 1))
     else:
         return [int(raw)]
コード例 #14
0
ファイル: user.py プロジェクト: MrSwiss/charla
    def motd(self, sock, source):
        if not self.server.motd:
            return ERR_NOMOTD()

        return flatten((
            RPL_MOTDSTART(self.server.host),
            map(RPL_MOTD, self.server.motd),
            RPL_ENDOFMOTD()
        ))
コード例 #15
0
ファイル: lang.py プロジェクト: jacob414/kingston
def unfold_gen(x: Generator[Any, None, None],
               cast: type = tuple) -> Iterable[Any]:
    """Quick recursive unroll of possibly nested (uses funcy library under
    the hood)

    """
    res = tuple(fy.flatten(x, isgen))
    if TYPE_CHECKING:
        res = cast(Iterable, res)  # pragma: nocov
    return res
コード例 #16
0
ファイル: transform.py プロジェクト: despawnerer/theatrics
def transform_event(kudago_event, parent_id, children_count):
    tags = kudago_event['tags']
    categories = kudago_event['categories']
    place = kudago_event['place']

    kind = find_first(('festival', 'exhibition', 'theater'), categories)
    dates = filter(is_date_finite, kudago_event['dates'])
    if kind not in ('festival', 'exhibition'):
        dates = flatten(map(split_date, dates))
    dates = list(sorted(map(transform_date, dates), key=itemgetter('start')))

    participants = [
        {'role': p['role']['slug'], 'agent': p['agent']['id']}
        for p in kudago_event['participants']
    ]

    return {
        '_id': kudago_event['id'],
        '_type': 'event',

        'kind': kind,
        'is_for_kids': 'kids' in categories,
        'is_premiere': 'премьера' in tags,

        'name': kudago_event['short_title'],
        'full_name': kudago_event['title'],
        'tagline': kudago_event['tagline'],
        'lead': strip_links(kudago_event['description']),
        'description': strip_links(kudago_event['body_text']),

        'location': kudago_event['location']['slug'],
        'place': place['id'] if place else None,
        'parent': parent_id,
        'participants': participants,

        'age_restriction': kudago_event['age_restriction'],
        'price': transform_price(kudago_event['price'], kudago_event['is_free']),

        'dates_count': len(dates),
        'children_count': children_count,
        'favorites_count': kudago_event['favorites_count'],
        'comments_count': kudago_event['comments_count'],

        'start': dates[0]['start'] if dates else None,
        'end': dates[-1]['end'] if dates else None,

        'first_image': first(kudago_event['images']),
        'images': kudago_event['images'],
        'dates': dates,

        'source': {
            'name': 'kudago.com',
            'url': kudago_event['site_url'],
        }
    }
コード例 #17
0
ファイル: utils.py プロジェクト: shigabeev/FalsifyNN
def generatePicture(Lib, params, pic_path, road_type = 0, car_type = 0):
    road = Lib.getElement("roads", road_type)
    old_road = copy.deepcopy(road)
    car = Lib.getElement("cars", car_type)
    params.append(list(np.ones(6 - len(params))))
    params = fn.flatten(params)
    (new_coords, loc, new_carimage) = shift_xz(old_road, car, params[0], params[1])
    new_image = generateImage(old_road.data, new_carimage, loc)
    ModifiedImage = modifyImageLook(new_image, params[2], params[3], params[4], params[5])
    ModifiedImage.save(pic_path)
    return new_coords
コード例 #18
0
ファイル: lietuva.py プロジェクト: sirex/databot-bots
def run(bot):
    path = pathlib.Path('data/osm')
    source = 'http://download.gisgraphy.com/openstreetmap/pbf/LT.tar.bz2'
    output = path / 'LT.tar.gz2'

    bot.output.info('Downloading %s' % source)
    http_code = subprocess.check_output(funcy.flatten([
        'curl', source,
        ['--time-cond', str(output)] if output.exists() else [],
        '--output', str(output),
        '--location',
        '--silent',
        '--write-out', '%{http_code}',
    ]))

    http_code = http_code.decode()

    if http_code == '200':
        bot.output.info('Extracting %s' % output)
        subprocess.check_call(['tar', '--directory', str(output.parent), '-xjf', str(output)])

        # https://github.com/openstreetmap/osm2pgsql#usage
        subprocess.check_call([
            'osm2pgsql',
            '--create',
            '--database', 'lietuva',
            '--style', str(path / 'lietuva.style'),
            '--input-reader', 'pbf',
            'data/osm/LT',
        ])

        bot.output.info('Query places')
        bot.pipe('places').clean().append(query_places(), progress='places')

        csv_output_path = path / 'places.csv'
        bot.output.info('Export places to %s' % csv_output_path)
        bot.pipe('places').export(str(csv_output_path), include=[
            'osm_id',
            'type',
            'place',
            'population',
            'wikipedia_title',
            'wikipedia_lang',
            'lon',
            'lat',
            'admin_level_6_osm_id',
            'admin_level_6',
            'admin_level_5_osm_id',
            'admin_level_5',
            'admin_level_4_osm_id',
            'admin_level_4',
        ])

        bot.compact()
コード例 #19
0
ファイル: cpsFsmFsa.py プロジェクト: wuyou33/cps_multi_agent
def customFSAInefficientIntersection(fsaA, fsaB, simplifyActions=True):
    '''Intersection of two fsa'''

    states = [
        tuple(funcy.flatten(i))
        for i in itertools.product(fsaA.states, fsaB.states)
    ]
    alphabet = set.union(fsaA.alphabet, fsaB.alphabet)
    initStates = [
        tuple(funcy.flatten(i))
        for i in itertools.product(fsaA.initStates, fsaB.initStates)
    ]
    finalStates = [
        tuple(funcy.flatten(i))
        for i in itertools.product(fsaA.finalStates, fsaB.finalStates)
    ]

    transitions = []
    for ti in fsaA.transitions:
        for tj in fsaB.transitions:
            if ti[2] == tj[2]:

                if isinstance(ti[0], TupleType):
                    ti_start = ti[0]
                    ti_end = ti[1]
                else:
                    ti_start = (ti[0], )
                    ti_end = (ti[1], )

                if isinstance(tj[0], TupleType):
                    tj_start = tj[0]
                    tj_end = tj[1]
                else:
                    tj_start = (tj[0], )
                    tj_end = (tj[1], )

                transitions.append(
                    (ti_start + tj_start, ti_end + tj_end, ti[2]))

    return CustomFSA(states, alphabet, transitions, initStates, finalStates,
                     simplifyActions)
コード例 #20
0
ファイル: utils.py プロジェクト: shigabeev/FalsifyNN
def generateGenImage(Lib, pic_path, road_type, obj_list, other_params):
    road = Lib.getElement("roads", road_type)
    new_image = copy.deepcopy(road)
    for obj in obj_list:
        element = Lib.getElement(obj.type, obj.id)
        (loc, new_obj_image) = shift_xz(new_image, element, obj.coord.x, obj.coord.y)
        new_image = generateImage(new_image.data, new_obj_image, loc)
    other_params.append(list(np.ones(4 - len(other_params))))
    other_params = fn.flatten(other_params)
    ModifiedImage = modifyImageLook(new_image, other_params[0], other_params[1], other_params[2], other_params[3])
    ModifiedImage.save(pic_path)
    return ModifiedImage
コード例 #21
0
ファイル: api_access.py プロジェクト: Lrraymond13/drug_api
def compile_ids_output(request_res, filename=None, directory=None):
    # collect id values and output to csv
    ids = []
    for process_res in request_res:
        for (code, offset, res) in process_res:
            if code == requests.code.ok:
                ids.append(res)
    ser = pd.Series(np.array(funcy.flatten(ids)), name='TrialId')
    path = 'TrialIds.csv' or filename
    if directory:
        path = os.path.join(directory, path)
    ser.to_csv(path, index=False)
コード例 #22
0
def upgrade():
    tags_regex = re.compile('^([\w\s]+):|#([\w-]+)', re.I | re.U)
    connection = op.get_bind()

    dashboards = connection.execute("SELECT id, name FROM dashboards")

    update_query = text("UPDATE dashboards SET tags = :tags WHERE id = :id")
    
    for dashboard in dashboards:
        tags = compact(flatten(tags_regex.findall(dashboard[1])))
        if tags:
            connection.execute(update_query, tags=tags, id=dashboard[0])
コード例 #23
0
def upgrade():
    tags_regex = re.compile("^([\w\s]+):|#([\w-]+)", re.I | re.U)
    connection = op.get_bind()

    dashboards = connection.execute("SELECT id, name FROM dashboards")

    update_query = text("UPDATE dashboards SET tags = :tags WHERE id = :id")

    for dashboard in dashboards:
        tags = compact(flatten(tags_regex.findall(dashboard[1])))
        if tags:
            connection.execute(update_query, tags=tags, id=dashboard[0])
コード例 #24
0
ファイル: permissions.py プロジェクト: Xangis/redash
def has_access(object_groups, user, need_view_only):
    if 'admin' in user.permissions:
        return True

    matching_groups = set(object_groups.keys()).intersection(user.groups)

    if not matching_groups:
        return False

    required_level = 1 if need_view_only else 2
    group_level = 1 if any(flatten([object_groups[group] for group in matching_groups])) else 2

    return required_level <= group_level
コード例 #25
0
def has_access(object_groups, user, need_view_only):
    if 'admin' in user.permissions:
        return True

    matching_groups = set(object_groups.keys()).intersection(user.groups)

    if not matching_groups:
        return False

    required_level = 1 if need_view_only else 2
    group_level = 1 if any(
        flatten([object_groups[group] for group in matching_groups])) else 2

    return required_level <= group_level
コード例 #26
0
def main(args):
    config = parse_cfg_file(args.config_file, section=args.data_type)
    analysis_col_patterns = config.get('analysis_col_patterns')
    target_metadata_cols = funcy.flatten(config.get('target_metadata_cols'))
    data_type_label = config.get('data_type_mapping').get(args.data_type)

    output_pcl = add_metadata_to_tsv(args.input_file,
                                     args.metadata_file,
                                     data_type_label,
                                     args.id_col,
                                     analysis_col_patterns,
                                     args.drop_missing_cols,
                                     target_metadata_cols,
                                     supplement=args.supplement)
コード例 #27
0
def transform_event(kudago_event, parent_id, children_count):
    tags = kudago_event['tags']
    categories = kudago_event['categories']
    place = kudago_event['place']

    kind = find_first(('festival', 'exhibition', 'theater'), categories)
    dates = filter(is_date_finite, kudago_event['dates'])
    if kind not in ('festival', 'exhibition'):
        dates = flatten(map(split_date, dates))
    dates = list(sorted(map(transform_date, dates), key=itemgetter('start')))

    participants = [{
        'role': p['role']['slug'],
        'agent': p['agent']['id']
    } for p in kudago_event['participants']]

    return {
        '_id': kudago_event['id'],
        '_type': 'event',
        'kind': kind,
        'is_for_kids': 'kids' in categories,
        'is_premiere': 'премьера' in tags,
        'name': kudago_event['short_title'],
        'full_name': kudago_event['title'],
        'tagline': kudago_event['tagline'],
        'lead': strip_links(kudago_event['description']),
        'description': strip_links(kudago_event['body_text']),
        'location': kudago_event['location']['slug'],
        'place': place['id'] if place else None,
        'parent': parent_id,
        'participants': participants,
        'age_restriction': kudago_event['age_restriction'],
        'price': transform_price(kudago_event['price'],
                                 kudago_event['is_free']),
        'dates_count': len(dates),
        'children_count': children_count,
        'favorites_count': kudago_event['favorites_count'],
        'comments_count': kudago_event['comments_count'],
        'start': dates[0]['start'] if dates else None,
        'end': dates[-1]['end'] if dates else None,
        'first_image': first(kudago_event['images']),
        'images': kudago_event['images'],
        'dates': dates,
        'source': {
            'name': 'kudago.com',
            'url': kudago_event['site_url'],
        }
    }
コード例 #28
0
def semanticTransformations(xml):
    # look for all the variables in the definitions block
    variables_defined = xml.xpath(
        '/node/definitions/element/variable[1]/text()')

    # from the previous list, find tags with these variable names
    variables_used = funcy.flatten(
        map(lambda x: xml.xpath('//' + x), variables_defined))

    # rename the variable tag to 'function' and add the name to the attribute 'name'
    for variable in variables_used:
        name = variable.tag
        variable.tag = 'function'
        variable.attrib['name'] = name

    return xml
コード例 #29
0
ファイル: analyze_subs.py プロジェクト: sshleifer/kaggle-rsna
def get_1_or_2(x, cut1, cut2):
    """Unused"""
    if len(x) == 0:
        return np.nan
    new_dets = []
    x = lmap(float, x)
    chunks = list(funcy.chunks(5, x))
    for i, c in enumerate(chunks):
        if c[0] > cut1:
            new_dets.append(c)
        elif (i == 1) and c[0] > cut2:
            assert len(new_dets) == 1
            new_dets.append(c)
        else:
            break
    return ' '.join(list(funcy.flatten(new_dets)))
コード例 #30
0
ファイル: production.py プロジェクト: Ivan1931/rcfg-generator
 def perform(self, string):
     """
     Performs the production on an arbitrary string.
     Returns:
         * List of symbols (string) where the production has been performed
             if the context allows this production to happen (can_perform is true)
         * The origional string if the context does not allow productions
     """
     if self.can_perform(string):
         possible = []
         for idx in range(0, len(string)):
             if string[idx] == self.trigger_variable:
                 possible.append(funcy.flatten([self.transform_to if jdx == idx else val for (jdx, val) in enumerate(string)]))
         return possible
     else:
         return []
コード例 #31
0
ファイル: post.py プロジェクト: pmartynov/golodranets
    def get_all_replies(root_post=None, comments=list(), all_comments=list()):
        """ Recursively fetch all the child comments, and return them as a list.

        Usage: all_comments = Post.get_all_replies(Post('foo/bar'))
        """
        # see if our root post has any comments
        if root_post:
            return Post.get_all_replies(comments=list(root_post.get_replies()))
        if not comments:
            return all_comments

        # recursively scrape children one depth layer at a time
        children = list(flatten([list(x.get_replies()) for x in comments]))
        if not children:
            return all_comments or comments
        return Post.get_all_replies(comments=children, all_comments=comments + children)
コード例 #32
0
def has_access_to_groups(obj, user, need_view_only):
    groups = obj.groups if hasattr(obj, 'groups') else obj

    if 'admin' in user.permissions:
        return True

    matching_groups = set(groups.keys()).intersection(user.group_ids)

    if not matching_groups:
        return False

    required_level = 1 if need_view_only else 2

    group_level = 1 if all(flatten([groups[group] for group in matching_groups])) else 2

    return required_level <= group_level
コード例 #33
0
ファイル: api.py プロジェクト: ljb-2000/SnDB
def service_info_stats():
    company_list = []
    company_id_list = flatten(
        Business.query.filter_by(pid=1).values(Business.id))
    if company_id_list:
        _results = db.session.execute(CHOICE_HOST_TYPE_DATA).fetchall()
        _host_pyhsics_set = set([str(_result[0]) for _result in _results])
        for company_id in company_id_list:
            business_id_list = [
                mess[0] for mess in Business.query.filter(
                    Business.type > 1).values(Business.id, Business.product)
                if str(company_id) in mess[1].split(',')
            ]
            if not business_id_list:
                continue
            company_service_total, company_host_physics, company_host_vms = 0, set(
            ), set()
            _company_dict, item_list = {}, []
            for business_id in business_id_list:
                _sql = BUSINESS_SERVICE_HOST_DATA.format(business_id)
                _rets = db.session.execute(_sql).fetchall()
                for _ret in _rets:
                    _business_dict = {}
                    _business_dict['business_name'] = _ret[0]
                    _business_dict['business_service'] = {'total': _ret[1]}
                    _host_set = set(_ret[-1].split(','))
                    _physics_set = _host_set.intersection(_host_pyhsics_set)
                    _vms_set = _host_set.difference(_host_pyhsics_set)
                    _business_dict['business_host'] = {
                        'total': len(_host_set),
                        'physics': len(_physics_set),
                        'vms': len(_vms_set)
                    }
                    item_list.append(_business_dict)
                    company_host_physics.update(_physics_set)
                    company_host_vms.update(_vms_set)
                    company_service_total += _ret[1]
            _company_dict['company_name'] = Business.query.get(company_id).name
            _company_dict['company_host'] = {
                'total': len(company_host_physics) + len(company_host_vms),
                'physics': len(company_host_physics),
                'vms': len(company_host_vms)
            }
            _company_dict['company_service'] = {'total': company_service_total}
            _company_dict['items'] = item_list
            company_list.append(_company_dict)
    return company_list
コード例 #34
0
ファイル: test_signing.py プロジェクト: stjordanis/special_k
    def test_checked_in_keys(self):
        # test that there is a one to one map between checked in keys and fingerprints
        keyname_to_fingerprint = get_keyname_to_fingerprint()
        self.assertEqual(set(get_trusted_pub_keys()), set(keyname_to_fingerprint.keys()))
        self.assertIn(_UNSAFE_KEY_FOR_TESTING_FINGERPRINT, keyname_to_fingerprint.values())

        # Make sure people don't mess with the trusted_keys directory
        cur_path = os.path.dirname(os.path.abspath(__file__))
        trusted_keys_dir = os.path.join(cur_path, "./fake_keys")
        trustdb_path = os.path.join(trusted_keys_dir, "trustdb.txt")

        # enumerate all the possible files that might have accidentally ended up in trusted_keys
        # If someone has good reason to add a .py file (other than __init__), then can delete
        # that extension from here
        file_patterns_to_check = ("*.py", "*.txt", "*.key", "*.pem", "*.pub*", "*.asc")
        all_files_in_trusted_keys_dir = funcy.flatten(
            glob.glob(os.path.join(trusted_keys_dir, pattern)) for pattern in file_patterns_to_check
        )

        all_file_names = {  # take basename and fine uniques
            os.path.basename(filepath) for filepath in all_files_in_trusted_keys_dir
        }

        expected_filenames = get_trusted_pub_keys().union(
            {"trustdb.txt", "__init__.py", "my.txt.asc", "testing.secret.asc"}
        )
        # expected_filenames is a frozenset, need to cast to set for nice debugging
        self.assertEqual(all_file_names, set(expected_filenames))

        # test that only the ultimately trusted key is in the trustdb
        fingerprints_in_trust_db = _get_fingerprints_in_trust_db(trustdb_path)
        self.assertEqual(
            len(fingerprints_in_trust_db),
            1,
            "Found {} items in trustdb. Expected 1. Someone has added keys to the "
            "trustdb but only the ultimately trusted key should be "
            "there".format(len(fingerprints_in_trust_db)),
        )
        expected_entry = "{}:6:".format(_UNSAFE_KEY_FOR_TESTING_FINGERPRINT)
        self.assertEqual(
            fingerprints_in_trust_db[0],
            expected_entry,
            "Found a single entry, `{}` in the trustdb but it does not match the "
            "ultimately trusted key. Only the ultimately trusted key should live inside the "
            "trust db.".format(fingerprints_in_trust_db[0]),
        )
コード例 #35
0
def fetch_comments_flat(root_post=None, comments=list(), all_comments=list()):
    """
    Recursively fetch all the child comments, and return them as a list.

    Usage: all_comments = fetch_comments_flat(Post('@foo/bar'))
    """
    # see if our root post has any comments
    if root_post:
        return fetch_comments_flat(comments=root_post.get_comments())
    if not comments:
        return all_comments

    # recursively scrape children one depth layer at a time
    children = list(flatten([x.get_comments() for x in comments]))
    if not children:
        return all_comments
    return fetch_comments_flat(all_comments=comments + children, comments=children)
コード例 #36
0
ファイル: group_sum.py プロジェクト: NAKlama/HormoneLevels
 def __get_common_factors(
     factors_list: Iterable[WithFactors]
 ) -> Tuple[List[WithFactors], List[int]]:
     fl_a, fl_b = tee(factors_list, 2)
     residual_factors = lmap(lambda fl: (fl[0], []), fl_a)
     common_factors = []
     grouped_factors = lmap(lambda wf: Counter(wf[1]), fl_b)
     factors = set(flatten(map(lambda gf: list(gf.keys()),
                               grouped_factors)))
     for f in factors:
         cnt = lmap(lambda gf: gf[f], grouped_factors)
         min_cnt = min(cnt)
         common_factors += [f] * min_cnt
         for rf, rf_cnt_old in zip(residual_factors, cnt):
             rf_cnt = rf_cnt_old - min_cnt
             if rf_cnt > 0:
                 rf[1].append([f] * rf_cnt)
     return residual_factors, common_factors
コード例 #37
0
ファイル: audioApp.py プロジェクト: janastu/papad-web
def admin():
    if 'google_token' in session:
        auth_tok = session['google_token']
        me = session['message']
        flash("Welcome" + " " + me.get('name') + "!")
    else:
        session['message'] = {'email': ''}
        auth_tok = {'access_token': '', 'refresh_token': ''}
    superAdmins = flatten(pluck("adminUsers",
                                app.config.get('STATIONS')))
    print repr(superAdmins)
    users = []
    for user in mongo.db.users.find():
            users.append(user)
    return render_template('admin.html', access_token=auth_tok,
                           refresh_token=auth_tok, session=session['message'],
                           config=current_app.config, users=users,
                           superAdmins=superAdmins)
コード例 #38
0
    def computeGrammarProduct(self, initState, currState, stateNonGrammarInd=1, stateGrammarInd=0, performStateTrace=False): 
        '''Get the product automaton of grammar and transition graph'''
        
        # Get fsa parameters
        _ , _, self.fsaTransitions, _ = self.getFSA()
        gameTransitions = set([tuple(i) for i in self.fsaTransitions])
        
        # Get adversary ID
        for i in xrange(self.numAgents):
            if self.agents[i].agentType == 'UNKNOWN':
                advID = i
                break
            
        # Get adversary parameters
        advGrammarObj = self.agents[advID].grammarObj
        advAlphabet = self.agentAlphabet[advID] 
        advName = self.agents[advID].agentName
        advPrevGrammar = self.agents[advID].prevGrammar
        
        # Get the move sequence list
        moveSeqList = []
        for i in xrange(self.numAgents):
            for j in xrange(self.numAgents):
                
                # Check if agent name matches with the move order before extracting the corresponding moves
                if self.moveOrder[i] == self.agents[j].agentName:
                    moveSeqList.append(self.agents[j].agentWord)
                    
        # Convert list of move sequences of each agent into a contiguous sequence
        moveSeq = list(itertools.izip_longest(*moveSeqList))
        moveSeq = funcy.flatten(moveSeq)
        moveSeq = [i for i in moveSeq if i is not None]
        
        # Check if adversary's grammar has changed 
        if advGrammarObj.grammar == advPrevGrammar:
                    
            # Do not not need to recalculate product automaton
            prodStates = self.prodAutomaton.outStates
            prodTransitions = self.prodAutomaton.outTransitions
            transitionsUpdated = False
        else:   
                               
            # Update product automaton and get product transitions
            prodStates, prodTransitions = self.prodAutomaton.computeFsaProductTransitions(self.gameStates, gameTransitions, 
                                                                                          advGrammarObj, advAlphabet, 
                                                                                          advName, self.numAgents) 
            transitionsUpdated = True
            
        # Update the previous grammar on record
        self.agents[advID].prevGrammar = deepcopy(advGrammarObj.grammar)                

        # Sanity check to see if transitions are lost
        # if self.prevProdTransitions.difference(prodTransitions):
            # t1 = self.prevProdTransitions.difference(prodTransitions)
            # t2 = prodTransitions.difference(self.prevProdTransitions)
            # pass
        # assert not self.prevProdTransitions.difference(prodTransitions), "Some transitions are lost!"
        
        # Get the new transitions
        newTransitions = prodTransitions.difference(self.prevProdTransitions)
        
        if performStateTrace:        
            # Get init product state
            initProdState = (list(self.prodAutomaton.grammarFsa.initStates)[0], initState)
            # for state in self.prodAutomaton.productFsa.initStates:
            #    if state[stateNonGrammarInd] == initState:
            #        initProdState = state
            
            # Sanity check to see if initial state is found in product
            assert initProdState in prodStates, "Product initial state cannot be identified, too bad!"
            
            # Get the lookup table for traversing transitions
            stateActionToTargetLookup, _ = generateLookupDictsForTransitions(prodTransitions)
            
            # Traverse fsa to get current state
            currProdState = traverseTransitions(initProdState, moveSeq, stateActionToTargetLookup)
            # currProdState = traverseTransitions(initProdState, moveSeq, prodTransitions) 

        else:
            currProdState = None
            
        self.prevProdTransitions = prodTransitions
        return prodStates, prodTransitions, newTransitions, currProdState, transitionsUpdated
コード例 #39
0
ファイル: channel.py プロジェクト: MrSwiss/charla
 def join(self, sock, source, names, keys=None):
     return filter(
         None,
         flatten(self._join(sock, source, name) for name in names.split(u(",")) if name)
     )
コード例 #40
0
ファイル: importer.py プロジェクト: tsouvarev/theatrics
 async def import_pages(self, pages, transform, doc_type):
     futures = []
     async for page in pages:
         coro = self.import_list(page, transform, doc_type)
         futures.append(asyncio.ensure_future(coro))
     return flatten(await asyncio.gather(*futures))
コード例 #41
0
ファイル: keymap.py プロジェクト: jirutka/sublimedsl
 def _apply_default_match_all(self, bindings):
     for context in flatten(pluck_attr('context', bindings)):
         if context.match_all is None:
             context.match_all = self._default_match_all
     return bindings
コード例 #42
0
ファイル: js_ast.py プロジェクト: mozilla/spiderflunky
        return interface

    def visit_interface(self, _, (_0, _1, name, _2, maybe_inherit, _3, _4,
                                  _5, maybe_attrs, _6, _7)):
        """Parse the name, inheritance, and attributes for an interface."""
        inherit = maybe_inherit[0] if maybe_inherit else []
        attrs = maybe_attrs[0] if maybe_attrs else []
        return (name, inherit, attrs)

    def visit_inherit(self, _, (_0, _1, parents)):
        """Get all of the interfaces this interface inherits from."""
        return parents

    def visit_parents(self, _, (name, _0, more_parents)):
        """Parse inheritance list."""
        return [name] + flatten(more_parents)

    def visit_attrs(self, _, (attr, _0, _1, _2, attrs)):
        """Return list of all attributes of an interface."""
        return [attr] + (attrs[0] if attrs else [])

    def visit_attr(self, _, children):
        """Parse attribute."""
        # task throw away attr if its static like type
        return children[0]

    def visit_id(self, node, _):
        """Grab the identifier that was match."""
        return node.match.group()

    def generic_visit(self, _, visited_children):
コード例 #43
0
ファイル: channel.py プロジェクト: JasonWoof/charla
 def join(self, sock, source, names):
     return flatten(self._join(sock, source, name) for name in names.split(u","))