Example #1
0
    def UpdateNetList(self,
                      state=None,
                      info=None,
                      force_check=False,
                      firstrun=False):

        if self._Daemon == None:
            return

        if not state:
            state, trash = self._Daemon.GetConnectionStatus()
            print("state")
            pp(state)
            print("Trash: ")
            pp(trash)

        if force_check or self._PrevWicdState != state:
            self.GenNetworkList()  ## refresh the network list

        if info != None:
            if len(info) > 3:
                _id = int(info[3])
                if _id < len(self._WirelessList):
                    self._WirelessList[_id].UpdateStrenLabel(str(info[2]))

        self._PrevWicdState = state
 def print_dup(self):
     """
     打印重复图片信息
     :return:
     """
     beeprint.pp(self.__dup_name_dict)
     return self
Example #3
0
def feature_outcome_statistics(how_many_documents=-1, db='log_feature'):
    db = Mongo.getInstance()[db]
    names = [
        collection for collection in db.list_collection_names()
        if not collection.startswith('system.')
    ]
    stats_dict = {}

    for collection_name in names:
        c = db[collection_name]
        start_date = None
        end_date = None
        count_true = 0
        # for idx, d in enumerate(c.find().sort([("timestamp", pymongo.DESCENDING)])):
        for idx, d in enumerate(c.find().sort([("_id", pymongo.DESCENDING)])):
            if idx == 0:
                start_date = d['timestamp']

            count_true += 1 if d['outcome'] == True else 0

            if idx == how_many_documents - 1:
                break

        stats_dict[collection_name] = {
            'name': collection_name,
            'number positive outcomes': count_true,
            'perc positive outcomes': count_true / idx,
            'start date': start_date.strftime("%Y-%m-%d %H:%M"),
            'end date': d['timestamp'].strftime("%Y-%m-%d %H:%M"),
        }

    for key in stats_dict.keys():
        pp(stats_dict[key])
 def print_info(self):
     """
     打印图片信息
     :return:
     """
     beeprint.pp(self.__info_dict)
     return self
Example #5
0
    def create(self, request):
        cp = request.data.copy()
        cp['year'] = request.data['year'] + '-01-01'
        volume = int(cp['volume'])

        # generate questions
        # questions = serializers.serialize("json", QuestionTr.objects.order_by('?')[:volume])
        questions = QuestionTr.objects.order_by('?')[:volume].values()
        question_ids = list(questions.values_list('id', flat=True))
        cp['questions'] = str(question_ids)

        beeprint.pp(cp)
        serializer = self.get_serializer(data=cp)
        serializer.is_valid(raise_exception=True)
        self.perform_create(serializer)
        exam_id = serializer.data["id"]

        # Update ExamTr for questions field
        # s = ExamTr.objects.filter(id=exam_id).first()
        # s.questions = question_ids
        # s.save()

        # Serialize Response Data
        response_data = {
            "questions": list(questions),
            "exam": serializer.data,
        }
        # beeprint.pp(response_data)

        headers = self.get_success_headers(exam_id)
        return Response(response_data,
                        status=status.HTTP_201_CREATED,
                        headers=headers,
                        content_type='application/json')
Example #6
0
def test_complicate_data(output=True):
    config = Config()
    config.string_break_method = C._STRING_BREAK_BY_WIDTH
    config.text_autoclip_enable = False
    res = pp(values, output, config=config)
    res += pp(long_text_in_dict, output, config=config)
    res += pp(long_text_in_list, output, config=config)
    return res
Example #7
0
def send(ctx, phone_number, country_code='86', mode='async', send_mode='sms'):
    pp(call(
        'verification/send',
        country_code=country_code,
        phone_number=phone_number,
        mode=mode,
        send_mode=send_mode
    ))
Example #8
0
def test_complicate_data(output=True):
    config = Config()
    config.string_break_method = C._STRING_BREAK_BY_WIDTH
    config.text_autoclip_enable = False
    res = pp(values, output, config=config)
    res += pp(long_text_in_dict, output, config=config)
    res += pp(long_text_in_list, output, config=config)
    return res
Example #9
0
def changeArrows():
    pp(plot.layout)
    arrowRenderers = [
        renderer for renderer in plot.renderers if
        (isinstance(renderer.name, str) and renderer.name.startswith("arrow"))
    ]
    for arrowRenderer in arrowRenderers:
        arrowRenderer.end.line_color = "green"
Example #10
0
 def file_rm(cls,url):
     
     path = "intelligence/{0}".format(url)
     if not os.path.isdir(path):
         return
     shell = "cd {} && rm -rf *.txt && rm -rf tee*.csv  && rm -rf *.xml && rm -rf *.html".format(path)
     subprocess.run(shell,shell=True)
     pp('清除临时文件')
Example #11
0
def pp(var):
    """Pretty print.

    Wrapper around beeprint

    Args:
        var: some variable
    """
    beeprint.pp(var)
Example #12
0
def alternative_pretty_print(self, myobject, prefix_line=None):
    """
    Make object(s) and structure(s) clearer to debug
    """

    if prefix_line is not None:
        print("PRETTY PRINT [%s]" % prefix_line)
    from prettyprinter import pprint as pp
    pp(myobject)
    return self
Example #13
0
def pretty_print(myobject, prefix_line=None):
    """
    Make object(s) and structure(s) clearer to debug
    """

    if prefix_line is not None:
        print("PRETTY PRINT [%s]" % prefix_line)
    from beeprint import pp
    pp(myobject)
    return
Example #14
0
def make(only_xls=False):

    ###################
    q = query.get_table_query(RDB_TABLE1)
    cursor = q.run()
    # print("SOME", cursor)

    # HTML STRIPPER
    if es.indices.exists(index=EL_INDEX0):
        es.indices.delete(index=EL_INDEX0)
    es.indices.create(index=EL_INDEX0, body=HTML_ANALYZER)

    # MULTI INDEX FILTERING
    if es.indices.exists(index=EL_INDEX1):
        es.indices.delete(index=EL_INDEX1)
    es.indices.create(index=EL_INDEX1, body=INDEX_BODY1)
    logger.info("Created index %s" % EL_INDEX1)

    # SUGGESTIONS
    if es.indices.exists(index=EL_INDEX2):
        es.indices.delete(index=EL_INDEX2)
    es.indices.create(index=EL_INDEX2, body=INDEX_BODY2)
    logger.info("Created index %s" % EL_INDEX2)

    # LEXIQUE
    if es.indices.exists(index=EL_INDEX3):
        es.indices.delete(index=EL_INDEX3)
    es.indices.create(index=EL_INDEX3, body={})
    logger.info("Created index %s" % EL_INDEX3)

    # es.indices.put_mapping(
    #     index=EL_INDEX2, doc_type=EL_TYPE2, body=SUGGEST_MAPPINGS)
    # print(es.indices.stats(index=EL_INDEX2))
    # exit(1)

    # print(es.indices.stats(index=EL_INDEX1))
    # print(es.info())

    ##################
    # READ FROM XLS FILE
    # dictionary = read_xls(fix_suggest=(not only_xls))
    read_xls(fix_suggest=(not only_xls))

    ###################
    count = 0
    for doc in cursor:
        elobj = single_update(doc)
        if elobj is not None:
            count += 1
            logger.info("[Count %s]\t%s" % (count, elobj['extrait']))

    # print("TOTAL", es.search(index=EL_INDEX1))
    print("Completed. No images:")
    pp(noimages.keys())
Example #15
0
 def DbusDaemonStatusChangedSig(self, state=None, info=None):
     print("in DbusDaemonStatusChangedSig")
     """
     dbus.UInt32(2L)
     ['192.168.31.141', 'TP-LINK4G', '88', '0', '72.2 Mb/s']
     """
     pp(info)
     self.UpdateNetList(state, info)
     if info != None:
         self._Screen.Draw()
         self._Screen.SwapAndShow()
Example #16
0
def save_credentials(user_id, code):
    code = code[0]
    user_id = user_id[0]
    print code
    print user_id
    oauth.fetch_access_token(code, get_redirect_url())
    # current = yaml.load(open(CREDS_FILE))
    user = oauth.token
    user['id'] = user_id
    pp(user)
    save_token(user)
Example #17
0
def pair_print(title, v):
    print(title)
    print("----")
    print("**pprint:**\n```python")
    pprint(v)
    print("```\n")
    print("**beeprint:**\n```python")
    pp(v)
    print("```\n")
    print("**pprintpp:**\n```python")
    ppp(v)
    print("```\n")
Example #18
0
def eval(y_true, y_hat):
    cm = confusion_matrix(y_true, y_hat)

    pp(cm)

    ps = precision_score(y_true, y_hat, average='micro')

    rs = recall_score(y_true, y_hat, average='micro')

    f1 = f1_score(y_true, y_hat, average='micro')

    print("Precision: {:.2f}, Recall: {:.2f} F1 Score: {:.2f}".format(
        ps, rs, f1))
Example #19
0
def main():
    # args = parse_args()

    parser = Parser(input_str)
    nodes = parser.parse()

    for n in nodes:
        beeprint.pp(n, max_depth=10, indent=4)

    checker = Checker(nodes)
    checker.check()

    gen = Generator(nodes, "./deep/nested/shit", True)
    gen.generate()
Example #20
0
def import_students(table, path):
    if len(list(table.run())) > 0:
        print "Table not empty"
        return

    with open(path) as fp:
        csvfile = UTF8Recoder(fp, 'utf-8')
        for r in csv.DictReader(csvfile):
            # r['name'] = unicode(r['name'].decode('utf-8')
            r['name'] = r['name'].strip()
            r['address'] = r['address'].strip()
            r['id'] = int(r['id'].strip())
            pp(r)
            table.insert(r).run()
Example #21
0
    def feature_importance(model, x_pipeline):
        i = model.feature_importances_

        extra_attrs = ["rooms_per_hhold", "pop_per_hhold", "bedrooms_per_room"]

        # encoder = get_pipeline_object(x_pipeline, ["union", "cat_pipeline", "label_binarizer"])

        encoder = x_pipeline.get_params()["union__cat_pipeline__label_binarizer"]

        one_hot_classes = list(np.array(encoder.categories_).ravel())

        attrs = num_attribs + extra_attrs + one_hot_classes

        pp(sorted(zip(i, attrs), reverse=True))
Example #22
0
def info(obj: pd.DataFrame):
    print(type(obj))

    if isinstance(obj, np.ndarray):
        __np_info(obj)
        df = pd.DataFrame(obj)

        __df_info(df)
        return

    if isinstance(obj, pd.DataFrame):
        df = pd.DataFrame(obj)

        __df_info(df)
        return

    if isinstance(obj, BaseEstimator):
        if hasattr(obj, "coef_"):
            pp(obj.coef_)

        if hasattr(obj, "feature_importances_"):
            pp(obj.feature_importances_)

        pp(obj.__dict__)
        return

    if isinstance(obj, dict):
        pp(obj)
        return

    raise ValueError(type(obj))
Example #23
0
    def test_complicate_data(self):
        config = Config()
        config.string_break_method = C._STRING_BREAK_BY_WIDTH
        config.text_autoclip_enable = False

        ans = u""
        data_path = os.path.join(CUR_SCRIPT_PATH, 'data/all_in_one.txt')
        with codecs.open(data_path, encoding="utf8") as fp:
            ans = fp.read()

        res = pp(values, output=False, config=config)
        res += pp(df.long_text_in_dict, output=False, config=config)
        res += pp(df.long_text_in_list, output=False, config=config)
        self.assertEqual(res, ans)
Example #24
0
    def test_complicate_data(self):
        config = Config()
        config.string_break_method = C._STRING_BREAK_BY_WIDTH
        config.text_autoclip_enable = False

        ans = u""
        data_path = os.path.join(CUR_SCRIPT_PATH, 
                                 'data/all_in_one.txt')
        with codecs.open(data_path, encoding="utf8") as fp:
            ans = fp.read()

        res = pp(values, output=False, config=config)
        res += pp(df.long_text_in_dict, output=False, config=config)
        res += pp(df.long_text_in_list, output=False, config=config)
        self.assertEqual(res, ans)
Example #25
0
def test_autoclip_no_room(output=True):
    config = Config()
    # config.debug_level = 9
    config.max_depth = 2
    config.string_break_width = 1
    config.string_break_method = C._STRING_BREAK_BY_WIDTH
    return pp(autoclip_no_room, output, config=config)
Example #26
0
def test():
    """test all cases"""
    for name, subcases in CASES.items():
        for subname, f in subcases.items():
            value = f(make_data())
            try:
                ok = (value['user'] == DATA['user']
                      and value['tags'] == DATA['tags']
                      and value['style'] == DATA['style'])
            except:
                ok = False
            if ok:
                print_item(name, subname, 'OK')
            else:
                print('{}:{}'.format(name, subname).center(60, '-'))
                pp(value)
Example #27
0
    def the_data(cls,url=''):
        #用于处理theharvester的生成文件
        path = "intelligence/{0}/{0}.html".format(url)
        
        try:

            with open(path,'r',encoding='utf-8') as f:
                soup = BeautifulSoup(f.read(),'lxml')
                
            with open("intelligence/{0}/the_{0}.txt".format(url),"a+") as r:
                for data in soup.find_all('li'):
                    r.writelines(data.get_text()+'\r\n')
            subprocess.run('rm %s'%path,shell=True)
        
        except:
            pp('文件不存在:%s'%path)
Example #28
0
def test_3lines_clip(output=True):
    config = Config()
    config.text_autoclip_enable = True
    config.text_autoclip_method = C._TEXT_AUTOCLIP_BY_LINE
    config.string_break_enable = True
    config.string_break_method = C._STRING_BREAK_BY_WIDTH
    return pp(clip_by_3_lines, output, config=config)
Example #29
0
def test_autoclip_no_room(output=True):
    config = Config()
    # config.debug_level = 9
    config.max_depth = 2
    config.string_break_width = 1
    config.string_break_method = C._STRING_BREAK_BY_WIDTH
    return pp(autoclip_no_room, output, config=config)
Example #30
0
def test_3lines_clip(output=True):
    config = Config()
    config.text_autoclip_enable = True
    config.text_autoclip_method = C._TEXT_AUTOCLIP_BY_LINE
    config.string_break_enable = True
    config.string_break_method = C._STRING_BREAK_BY_WIDTH
    return pp(clip_by_3_lines, output, config=config)
Example #31
0
    def test_comma_ending(self):
        rel_path = 'data/dict/comma.txt'
        data_path = os.path.join(CUR_SCRIPT_PATH, rel_path)
        with codecs.open(data_path, encoding='utf8') as fp:
            ans = fp.read()

        res = pp({'key': 'aaa,,,'}, output=False)
        self.assertEqual(ans, res)
Example #32
0
    def GenList(self):
        if self._AList == None:
            return
        self._MyList = []
        self._PsIndex = 0
        start_x = 0
        start_y = 0

        for i, v in enumerate(self._AList):
            #print(i,v) # (0, dbus.String(u'AddressType'))

            li = InfoPageListItem()
            li._Parent = self
            li._PosX = start_x
            li._PosY = start_y + i * InfoPageListItem._Height
            li._Width = Width
            li._Fonts["normal"] = self._ListFontObj

            if v == "UUIDs":
                li._Fonts["small"] = self._ListSm2FontObj
            else:
                li._Fonts["small"] = self._ListSmFontObj

            li.Init(str(v))
            li._Flag = v

            if v == "UUIDs":
                if len(self._AList[v]) > 1:
                    pp(self._AList[v][0])
                    sm_text = str(self._AList[v][0])
                else:
                    sm_text = "<empty>"
            else:
                sm_text = str(self._AList[v])

            if sm_text == "0":
                sm_text = "No"
            elif sm_text == "1":
                sm_text = "Yes"

            sm_text = sm_text[:20]
            li.SetSmallText(sm_text)

            li._PosX = 2
            self._MyList.append(li)
Example #33
0
    def fast_get_all(self, keyword, size=5, index=EL_INDEX3, type=EL_TYPE1):

        args = {'index': index, 'doc_type': type}
        # args['sort'] = ["sort_string:asc", "sort_number:asc"]
        # args['from_'] = current - 1
        args['from_'] = 0
        args['size'] = size
        args['body'] = {
            'query': {"match": {"_all": {"query": keyword}}}
        }

        try:
            out = self._api.search(**args)
        except Exception as e:
            logger.error("Failed to execute fast get query\n%s" % e)
            return None, False
        pp(out)
        return out['hits']['hits'], out['hits']['total']
Example #34
0
def main():
    # config for training
    config = Config()

    # config for validation
    valid_config = Config()
    valid_config.keep_prob = 1.0
    valid_config.dec_keep_prob = 1.0
    valid_config.batch_size = 60

    # configuration for testing
    test_config = Config()
    test_config.keep_prob = 1.0
    test_config.dec_keep_prob = 1.0
    test_config.batch_size = 1

    pp(config)

    # get data set
Example #35
0
    def grid_search(model, train_X, train_y):
        from sklearn.model_selection import GridSearchCV

        param_grid = [
            {'n_estimators': [3, 10, 30, 50, 80], 'max_features': [2, 4, 6, 8, 10]},
            {'bootstrap': [False], 'n_estimators': [3, 10, 30, 50, 80], 'max_features': [2, 4, 6, 8, 10]},
        ]

        grid_search = GridSearchCV(model, param_grid, cv=5, n_jobs=os.cpu_count(), scoring="neg_mean_squared_error")

        grid_search.fit(train_X, train_y)

        pp(grid_search.best_params_)

        pp(grid_search.best_estimator_)

        cvres = grid_search.cv_results_

        for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
            print(np.sqrt(-mean_score), params)
Example #36
0
def inspect(df):
    print("\n\nHEAD")
    pp(df.head())

    print("\n\nINFO")
    pp(df.info())

    print("\n\nINCOME_CAT_DIST")
    pp(df["income_cat"].value_counts() / len(df))

    print("\n\nCORR median_house_value")
    corr_matrix = df.corr()
    pp(corr_matrix["median_house_value"].sort_values(ascending=False))
Example #37
0
def main_processing(args):
    bp = BuildProcess()
    bp.create_process()
    print("====================================")
    git_rev = bp.get_git_commit()
    print "Git Commit is %r" % git_rev
    print("====================================")

    build_components(git_rev)
    pp(DeviceComponent.comp_dict)

    if args.clean:
        for key, value in DeviceComponent.comp_dict.items():
            print "Cleaning Device %s" % key
            value.clean(bp)
    else:
        for key, value in DeviceComponent.comp_dict.items():
            print "Building Device %s" % key
            value.build(bp)

    bp.terminate_process()
Example #38
0
def test_sort_of_string(output=True):
    config = Config()
    config.debug_delay = False
    config.str_display_not_prefix_u = False
    config.str_display_not_prefix_b = False
    for sstr in sort_of_string:
        ints = ''
        for e in sstr:
            try:
                ints += '%d ' % ord(e)
            except:
                ints += '%d ' % e
        print('%40s %s %s' % (ints, repr(sstr), len(sstr)))
    return pp(sort_of_string, output, config=config)
Example #39
0
    def test_out_of_range_in_dict(self):
        config = Config()
        config.max_depth = 1

        ans = ""
        rel_path = 'data/out_of_range_in_dict.txt'
        data_path = os.path.join(CUR_SCRIPT_PATH, rel_path)
        with codecs.open(data_path, encoding='utf8') as fp:
            ans = fp.read()
        # delete object id, such as
        # <definition.NormalClassOldStyle object at 0x7f2d9a61bac8>
        ans, _ = re.subn("at 0x[\d\w]+", "", ans)

        res = pp(df.out_of_range_in_dict, output=False, config=config)
        # delete object id, such as
        # <definition.NormalClassOldStyle object at 0x7f2d9a61bac8>
        res, _ = re.subn("at 0x[\d\w]+", "", res)
        
        self.assertEqual(res, ans)
Example #40
0
def test_recursion(output=True):
    d = {}
    d['d'] = d

    d2 = {'1':1}
    d2['r'] = {'d2':d2}

    l = []
    l.append(l)

    recursive_values = [
        l,
        l, # this one should not be treat as recursion
        d,
        d2,
        inst_of_recur_normal,
        RecurTestRecurClass,
    ]
    config = Config()
    # config.debug_level = 9
    return pp(recursive_values, output, config=config)
Example #41
0
def test_boundary_break(output=True):
    config = Config()
    config.string_break_method = C._STRING_BREAK_BY_WIDTH
    return pp(v_line_break_boundary, output, config=config)
Example #42
0
def test_out_of_range_in_dict(output=True):
    config = Config()
    config.max_depth = 1
    return pp(out_of_range_in_dict, output, config=config)
Example #43
0
def test_dict_ordered_keys(output=True):
    config = Config()
    config.dict_ordered_key_enable = True
    return pp(dict_multi_keys, output, config=config)
Example #44
0
def test_inline_repr_out_of_range(output=True):
    config = Config()
    config.max_depth = 1
    config.string_break_method = C._STRING_BREAK_BY_WIDTH
    config.string_break_width = 40
    return pp(inline_repr, output, config=config)
Example #45
0
          timeit("json.loads(text)", number=100000,
                 globals={"json": json, "text": text}))
    print("validr: %.3f" %
          timeit("f(value)", number=100000,
                 globals={"f": f_validr, "value": value}))
    print("validr_simple: %.3f" %
          timeit("f(value)", number=100000,
                 globals={"f": f_validr_simple, "value": value}))
    print("schema: %.3f" %
          timeit("f(value)", number=100000,
                 globals={"f": f_schema, "value": value}))


def profile():
    run("for i in range(10000):\tf(value)")

if __name__ == "__main__":
    cmd = sys.argv[1] if len(sys.argv) > 1 else None
    if cmd == "-p":
        profile()
    elif cmd == "-t":
        try:
            from beeprint import pp
        except:
            from pprint import pprint as pp
        pp(f_validr(value))
        pp(f_validr_simple(value))
        pp(f_schema(value))
    else:
        benchmark()
Example #46
0
 def test_negative(self):
     self.assertEqual(pp(-1, output=False), '-1\n')
     self.assertEqual(pp(-1.1, output=False), '-1.1\n')
Example #47
0
    def test_string(self):
        # string literal
        config = Config()
        config.str_display_not_prefix_u = False
        config.str_display_not_prefix_b = False
        self.assertEqual(pp("plain string", output=False, config=config), "'plain string'\n")

        # unicode string
        s = u'unicode string'
        if pyv == 2:
            self.assertEqual(pp(s, output=False, config=config), u"u'unicode string'\n")
        else:
            self.assertEqual(pp(s, output=False, config=config), u"'unicode string'\n")

        # utf8 string
        s = u'utf8 string'.encode('utf-8')
        if pyv == 2:
            self.assertEqual(pp(s, output=False, config=config), u"'utf8 string'\n")
        else:
            self.assertEqual(pp(s, output=False, config=config), u"b'utf8 string'\n")

        # gb2312 string
        s = u'gb2312 string'.encode('gb2312')
        if pyv == 2:
            self.assertEqual(pp(s, output=False, config=config), u"'gb2312 string'\n")
        else:
            self.assertEqual(pp(s, output=False, config=config), u"b'gb2312 string'\n")

        # unicode special characters string
        s = u'\\'
        if pyv == 2:
            self.assertEqual(pp(s, output=False, config=config), u"u'\\\\'\n")
        else:
            self.assertEqual(pp(s, output=False, config=config), u"'\\\\'\n")

        # utf8 special characters string
        s = u'\\'.encode("utf8")
        if pyv == 2:
            self.assertEqual(pp(s, output=False, config=config), u"'\\\\'\n")
        else:
            self.assertEqual(pp(s, output=False, config=config), u"b'\\\\'\n")
Example #48
0
def test_class_last_el(output=True):
    config = Config()
    config.instance_repr_enable = False
    rm = ReprMethodClassNewStyle()
    nc = NormalClassNewStyle()
    return pp([rm, nc, rm], output, config=config)
Example #49
0
def test_class_inst_repr_enable(output=True):
    config = Config()
    inst_repr = []
    for c in class_repr:
        inst_repr.append(c())
    return pp(class_repr + inst_repr, output, config=config)
Example #50
0
def test_inner_class(output=True):
    config = Config()
    return pp(OuterClass, output, config=config)
Example #51
0
def test_class(output=True):
    config = Config()
    return pp(EmptyClassNewStyle, output, config=config)
Example #52
0
def test_tuple_nested(output=True):
    config = Config()
    return pp(tuple_nested, output, config=config)
Example #53
0
 def test_positive(self):
     self.assertEqual(pp(1, output=False), '1\n')
     self.assertEqual(pp(1.1, output=False), '1.1\n')
Example #54
0
def test_tuple(output=True):
    config = Config()
    return pp(tuple_testing, output, config=config)
Example #55
0
def test_class_all_repr_disable(output=True):
    config = Config()
    config.instance_repr_enable = False
    return pp(class_repr, output, config=config)
Example #56
0
# variables
host = 'rdb'
db = 'mydb'
table = 'mytable'

# connect and create
r.connect(host=host).repl()
if db not in r.db_list().run():
    r.db_create(db).run()
if table not in r.db(db).table_list().run():
    r.db(db).table_create(table).run()

# implement actions
query = r.db(db).table(table)
if len(sys.argv) > 1:
    command = sys.argv[1]
    if command == 'changefeed':
        try:
            for change in query.changes().run():
                print("Subscription update!")
                pp(change)
        except Exception as e:
            print("Table removed or connection aborted")
    elif command == 'insert':
        query.insert({'key1': 'value1', 'key2': 'value2'}).run()
    elif command == 'update':
        query.filter({'key1': 'value1'}).update({'key1': 'UPDATED!'}).run()
    elif command == 'clear':
        r.db_drop(db).run()