Пример #1
0
def process(filter_id):
    f = get_filter(filter_id)
    print f
    if not f:
        logger.error("Can't find filter with id=%s", filter_id)
        sys.exit(1)
    run_filter(filter_id, f)
Пример #2
0
def get_filter(bot: Bot):
    return (
        utils.get_filter(utils.Config.watchers)
        & Filters.user(user_id=reduce(
            frozenset.union,
            map(frozenset,
                map(partial(utils.get_admin, bot), utils.Config.watchers)))))
Пример #3
0
def process(filter_id):
    f = get_filter(filter_id)
    print f
    if not f:
        logger.error("Can't find filter with id=%s", filter_id)
        sys.exit(1)
    run_filter(filter_id, f)
Пример #4
0
def register(updater: Updater):
    for module in submodules.values():
        module.register(updater)

    dp = updater.dispatcher
    dp.add_handler(CommandHandler(__name__, run, filters=get_filter(Config.watchers)))
    # dp.add_handler(CommandHandler(__name__, run, filters=Filters.all)) # DEBUG

    updater.bot.set_my_commands(commands_list) # * Unavailable until all commands are implemented (or at least their describe methods return a string with len > 3)
Пример #5
0
def register(updater: Updater):
    dp = updater.dispatcher
    from_chat_list = list()
    for from_chat in Config.forward.keys():
        if len(from_chat.split(":")) == 2:
            from_chat_list.append(from_chat.split(":")[0])
        else:
            from_chat_list.append(from_chat)
    dp.add_handler(MessageHandler(get_filter(from_chat_list), auto_forward))
Пример #6
0
def register(updater: Updater):
    filter_user = (
        get_filter(Config.watchers)
        & ~ Filters.user([updater.bot.id])
    )
    filter_command = Filters.command
    filter_reply = Filters.reply

    dp = updater.dispatcher
    dp.add_handler(MessageHandler(filter_user & filter_command, no))
    dp.add_handler(MessageHandler(filter_user & ~ filter_command & ~ filter_reply, add_keyboard))
    #dp.add_handler(MessageHandler(Filters.all, add_keyboard))
    dp.add_handler(CallbackQueryHandler(update_tag, pattern=regex.tag))
    dp.add_handler(CallbackQueryHandler(update_target, pattern=regex.target))
    dp.add_handler(CallbackQueryHandler(update_return, pattern=regex.ret))
    dp.add_handler(CallbackQueryHandler(update_message, pattern=regex.select))
    dp.add_handler(CallbackQueryHandler(push_single, pattern=regex.push))
Пример #7
0
    def __init__(self, params, seed=0):

        np.random.seed(seed)

        self.layers = params['layers']
        self.hidden = {}
        self.bias = {}

        self.observation_filter = get_filter(params['ob_filter'],
                                             shape=(params['ob_dim'], ))
        self.update_filter = True

        self.hidden['h1'] = np.random.randn(
            params['h_dim'], params['ob_dim']) / np.sqrt(
                params['h_dim'] * params['ob_dim'])
        self.bias['b1'] = np.random.randn(params['h_dim']) / np.sqrt(
            params['h_dim'])

        if params['layers'] > 1:
            for i in range(2, params['layers'] + 1):
                self.hidden['h%s' % str(i)] = np.random.randn(
                    params['h_dim'], params['h_dim']) / np.sqrt(
                        params['h_dim'] * params['h_dim'])
                self.bias['b%s' % str(i)] = np.random.randn(
                    params['h_dim']) / np.sqrt(params['h_dim'])

        self.hidden['h999'] = np.random.randn(
            params['ac_dim'], params['h_dim']) / np.sqrt(
                params['ac_dim'] * params['h_dim'])

        self.w_hidden = np.concatenate([
            self.hidden[x].reshape(self.hidden[x].size, )
            for x in self.hidden.keys()
        ])
        self.w_bias = np.concatenate([
            self.bias[x].reshape(self.bias[x].size, )
            for x in self.bias.keys()
        ])

        self.params = np.concatenate((self.w_hidden, self.w_bias))
        self.used = 1

        self.N = self.params.size
output_dir = args.output_dir
outFNm = args.full_crossnet_file
if outFNm is None:
  outFNm = os.path.join(args.output_dir, utils.get_full_cross_file_name(mode_name1, mode_name2))
outFNm2 = args.db_edge_file
if outFNm2 is None:
  outFNm2 = os.path.join(args.output_dir, utils.get_cross_file_name(mode_name1, mode_name2, db_id, dataset))


src_mapping = utils.read_mode_file(srcFile)
if os.path.samefile(srcFile, dstFile):
  dst_mapping = src_mapping
else:
  dst_mapping = utils.read_mode_file(dstFile)

src_filter = utils.get_filter(args.src_mode_filter)
dst_filter = utils.get_filter(args.dst_mode_filter)

add_schema = True
counter = args.snap_id_counter_start
if counter == -1:
  counter = utils.get_max_id(outFNm)
print 'Starting at snap id: %d' % counter
with open(inFNm, 'r') as inF:
  with open(outFNm, 'a') as fullF:
    with open(outFNm2, 'w') as dbF:
      # Add schema/metadata
      if counter == 0:
        fullF.write('# Full crossnet file for %s to %s\n' % (mode_name1, mode_name2))
        fullF.write('# File generated on: %s\n' % utils.get_current_date())
        fullF.write('# snap_eid\tdataset_id\tsrc_snap_nid\tdst_snap_nid\n')
    outFNm = os.path.join(
        args.output_dir,
        utils.get_full_cross_file_name(mode_name1, mode_name2))
outFNm2 = args.db_edge_file
if outFNm2 is None:
    outFNm2 = os.path.join(
        args.output_dir,
        utils.get_cross_file_name(mode_name1, mode_name2, db_id, dataset))

src_mapping = utils.read_mode_file(srcFile)
if os.path.samefile(srcFile, dstFile):
    dst_mapping = src_mapping
else:
    dst_mapping = utils.read_mode_file(dstFile)

src_filter = utils.get_filter(args.src_mode_filter)
dst_filter = utils.get_filter(args.dst_mode_filter)

add_schema = True
counter = args.snap_id_counter_start
if counter == -1:
    counter = utils.get_max_id(outFNm)
print 'Starting at snap id: %d' % counter
with open(inFNm, 'r') as inF:
    with open(outFNm, 'a') as fullF:
        with open(outFNm2, 'w') as dbF:
            # Add schema/metadata
            if counter == 0:
                fullF.write('# Full crossnet file for %s to %s\n' %
                            (mode_name1, mode_name2))
                fullF.write('# File generated on: %s\n' %
def create_mambo_crossnet_table(input_file,
                                src_file,
                                dst_file,
                                dataset_name,
                                db_id,
                                src_node_index,
                                dst_node_index,
                                mode_name1,
                                mode_name2,
                                output_dir,
                                full_crossnet_file,
                                db_edge_file,
                                src_mode_filter,
                                dst_mode_filter,
                                mambo_id_counter_start,
                                skip_missing_ids,
                                verbose=False,
                                delimiter=DELIMITER):
    inFNm = input_file
    srcFile = src_file
    dstFile = dst_file
    dataset = dataset_name
    db_id = db_id

    srcIdx = src_node_index
    dstIdx = dst_node_index

    src_db_id = utils.parse_dataset_id_from_name(os.path.basename(srcFile))
    dst_db_id = utils.parse_dataset_id_from_name(os.path.basename(dstFile))

    mode_name1 = utils.parse_mode_name_from_name(
        os.path.basename(srcFile)) if mode_name1 is None else mode_name1
    mode_name2 = utils.parse_mode_name_from_name(
        os.path.basename(dstFile)) if mode_name2 is None else mode_name2

    outFNm = full_crossnet_file
    if outFNm is None:
        outFNm = os.path.join(
            output_dir, utils.get_full_cross_file_name(mode_name1, mode_name2))
    outFNm2 = db_edge_file
    if outFNm2 is None:
        outFNm2 = os.path.join(
            output_dir,
            utils.get_cross_file_name(mode_name1, mode_name2, db_id, dataset))

    src_mapping = utils.read_mode_file(srcFile)
    if os.path.samefile(srcFile, dstFile):
        dst_mapping = src_mapping
    else:
        dst_mapping = utils.read_mode_file(dstFile)

    src_filter = utils.get_filter(src_mode_filter)
    dst_filter = utils.get_filter(dst_mode_filter)

    add_schema = True
    counter = mambo_id_counter_start
    if counter == -1:
        counter = utils.get_max_id(outFNm)
    if verbose:
        print 'Starting at mambo id: %d' % counter
    with open(inFNm, 'r') as inF, open(outFNm,
                                       'a') as fullF, open(outFNm2,
                                                           'w') as dbF:
        # Add schema/metadata
        if counter == 0:
            fullF.write('# Full crossnet file for %s to %s\n' %
                        (mode_name1, mode_name2))
            fullF.write('# File generated on: %s\n' % utils.get_current_date())
            fullF.write(
                '# mambo_eid%sdataset_id%ssrc_mambo_nid%sdst_mambo_nid\n' %
                (delimiter, delimiter, delimiter))
        dbF.write('# Crossnet table for dataset: %s\n' % dataset)
        dbF.write('# File generated on: %s\n' % utils.get_current_date())
        # Process file
        for line in inF:
            if line[0] in COMMENT:
                continue
            vals = utils.split_then_strip(line, delimiter)
            if add_schema:
                attrs_schema = '# mambo_eid%ssrc_dataset_id%sdst_dataset_id' % (
                    delimiter, delimiter)
                for i in range(len(vals)):
                    if i != srcIdx and i != dstIdx:
                        attrs_schema += '%sC%d' % (delimiter, i)
                dbF.write('%s\n' % attrs_schema)
                add_schema = False
            id1 = vals[srcIdx]
            id2 = vals[dstIdx]
            if src_filter:
                id1 = src_filter(id1)
            if dst_filter:
                id2 = dst_filter(id2)
            if id1 == '' or id2 == '':
                continue
            if skip_missing_ids and (id1 not in src_mapping
                                     or id2 not in dst_mapping):
                #print id1, id2
                continue
            attr_strs = ''
            for i in range(len(vals)):
                if i != srcIdx and i != dstIdx:
                    attr_strs += delimiter + vals[i]
            fullF.write('%d%s%d%s%d%s%d\n' %
                        (counter, delimiter, db_id, delimiter,
                         src_mapping[id1], delimiter, dst_mapping[id2]))
            dbF.write('%d%s%d%s%d%s\n' % (counter, delimiter, src_db_id,
                                          delimiter, dst_db_id, attr_strs))
            counter += 1
    if verbose:
        print 'Ending at mambo id: %d' % counter
Пример #11
0
def search_post(s, p):
    """ Searching on group n times parallel and serial"""
    f = __search_with_post_on_group__
    return measuring.exec_function(
        f, s, p, measuring.default_generate_data,
        utils.get_filter('post'))