예제 #1
0
    def change_log(self,fields,pipe=None,_new=False):
        """
        save the change log
          insert: id , field ,value
                      update: id , field ,value
                      delete: id
        param:
            fields:string
            pipe:redis pipe
            _new:True or false
        """
        #是否启用数据同步
        if not setting.DATA_SYNC:
            return

        #初始化服务
        dp  = pipe or self.db
        if _new==True:
            oper = "insert"
        elif _new==False:
            oper = "update"
        else:
            oper = "delete"

        #保存chang_log
        if oper == "delete":
                val = "%(oper)s:_:%(model_type)s:_:%(id)s:_:%(value)s" % {"oper":oper,"model_type":self.__class__.__name__,"id":self.id,"value" : ""}
        else:
            if not fields:
                return
            sfileds = json.dumps(fields)
            val = "%(oper)s:_:%(model_type)s:_:%(id)s:_:%(value)s" % {"oper":oper,"model_type":self.__class__.__name__,"id":self.id,"value" : sfileds}
        logger.info("sync: " + val)
        #保存数据dao Redis List Queue
        pipe.lpush("change_log",val)
예제 #2
0
    def delete(self):
        """
        delete the data of object use pipe
        delete index delete data of user
        """
        n = datetime.now()
        #注册事务
        pipe = self.db.pipeline()
        try:
            #1.删除索引
            self.drop_index(pipe)

            #2.删除用户数据
            pipe.delete(self.dump_key())
            
            #3.删除用户列表中对应的用户
            pipe.lrem(self.__class__.list_key(),0,self.id)
            self.change_log(None,pipe,"delete")
            #提交到Redis 数据库
            pipe.execute()
            logger.info("delete type:%s, id:%s ,use: %s" % (self.__class__.__name__,self.id,datetime.now() - n))
        except Exception,e:
            pipe.reset()
            logger.error(str(e))
            raise e
예제 #3
0
 def change_log(self,oper,obj_id,baseobj,pipe=None,score=None):
     """
     save the relation of Reference
     list|sortset:insert:user_posts:user_id:post_id
     list|sortset:delete:user_posts:user_id:post_id
     param:
         oper: the operation  type is string
         obj_id: id of object  type is integer
         baseobj: base object
         pipe: redis pipe  default is None
         score: use rank
     """
     #是否启用数据同步
     if not setting.DATA_SYNC:
         return
     
     #初始化服务
     dp  = pipe or RedisClient.getInstance().redis
     #保存chang_log
     #String = 操作符: 主类型_引用类型s  : 主类型ID: 此类型ID
     basetype = str(baseobj.__class__.__name__).lower()
     ref = self.ref.lower()
     if  basetype == ref:
         ref = self.name.lower()
     if oper.startswith("sortset"):
         val = "%(oper)s:_:%(model_type)s_%(relate_type)ss:_:%(id)s:_:%(rid)s:_:%(score)s" % {"oper":oper,"model_type": basetype,"relate_type": ref,"id":baseobj.id,"rid" : obj_id ,"score":score}
     else:
         val = "%(oper)s:_:%(model_type)s_%(relate_type)ss:_:%(id)s:_:%(rid)s" % {"oper":oper,"model_type": basetype,"relate_type": ref,"id":baseobj.id,"rid" : obj_id}
     logger.info("sync: " + val)
     #保存数据dao Redis List Queue
     dp.lpush("change_log",val)
예제 #4
0
 def lpush(self,key,value,**kwargs):
     """
     LPUSH key value Append an element to the head of the List value at key
     param;
         key:string
         value:string
         **kwargs: a dict
             obj:object
             baseobj:base object
     return:
         True or False
     """
     #print "listfield lpush ",key,",",value
     try:
         if setting.Debug:
             n = datetime.datetime.now()
         pipe = RedisClient.getInstance().redis.pipeline()
         pipe.lpush(key,value)
         self.change_log("list:insert",kwargs["obj"].id,kwargs["baseobj"],pipe)
         pipe.execute()
         if setting.Debug:
             logger.info(" lpush key: %s,use : %s" % (key,datetime.datetime.now() - n))
         return True
     except Exception,e:
         pipe.reset()
         logger.error(e)
         return False
예제 #5
0
 def on_message(self, message):
     if message == 'heart':
         device_id = str(self.get_argument('device_id'))
         logger.info(device_id + ': 心跳连接正常')
     else:
         res = xiao_tian_answer(question=message)
         self.write_message({"message": res})
     logger.info({"message", message})
예제 #6
0
 def smembers(self,key,select=None,**kwargs):
     """
     SMEMBERS key Return all the members of the Set value at key 
     param:
         key:string
         select:object  default is None
         **kwargs:dict
     return:
         objects of list
     """
     n = datetime.datetime.now() 
     pks = RedisClient.getInstance().redis.smembers(key)
     logger.info("smembers key: %s,select: %s,use: %s" % (key,select,datetime.datetime.now() - n))
     return self.ref_klass.objects.filter(id = tuple(pks),select=select)
예제 #7
0
 def lrange(self,key,start=0,end=10,select=None,**kwargs):
     """
     get the date in the list
     param:
         key:string
         start:integer default is 0
         end:integer  default is 10
         select:default is None related object
         **kwargs:dict
     return:
         the data in list
     """
     # LRANGE key start end Return a range of elements from the List at key
     n = datetime.datetime.now()
     pks = RedisClient.getInstance().redis.lrange(key,start,end)
     logger.info("lrange key: %s,start: %s, end: %s ,select:%s,use : %s" % (key,start,end,select,datetime.datetime.now() - n))
     return self.ref_klass.objects.filter(id=tuple(pks),select=select)
예제 #8
0
 def lrange(self,key,start=0,end=10,**kwargs):
     """
     LRANGE key start end Return a range of elements from the List at key
     param:
         key:string
         start:integer default is 0
         end:integer  default is 10
         **kwargs:dict
     return:
         the data in list
     """
     if setting.Debug:
         n = datetime.datetime.now()
     pks = RedisClient.getInstance().redis.lrange(key,start,end)
     if setting.Debug:
         logger.info("lrange key: %s,start: %s, end: %s ,use : %s" % (key,start,end,datetime.datetime.now() - n))
     
     #返回相关对象集合
     return  find_include(self.ref_klass,pks,kwargs)
예제 #9
0
 def save(self,pfields=None,new = False):
     n  =  datetime.now()
     """
     Saves the instance to the datastore Redis!
     params :
     pfields for partial fields save
     pfields = ("username","gender")
     param:
         pfields:object the default is None
         new:True or False
     return:
         True or False
     """
     #清空错误列表
     self._errors = []
     #检查字段的有效性
     if pfields:
         if not isinstance(pfields,tuple):
             self._errors.append("params must tuple list!")
             return False
         for p in pfields:
             if not self.attributes.has_key(p):
                 self._errors.append("%s field not exists!" % p)
                 return False
         
     #检查每个字段的有效性
     if not self.is_valid():
         return False
     #这个如果不是New 要生成一个ID
     if new:
         _new = True
     else:
         _new = self.is_new()
     
     if _new and not new and not self.id:
         self._initialize_id()
     
     #这里可以应用到分布式
     #with Mutex(self):
     self._write(_new,pfields)
     
     logger.info("save type:%s, id:%s ,use: %s" % (self.__class__.__name__,self.id,datetime.now() - n))
     return True
예제 #10
0
    def show(self,save= False,debug = False,send=True):
        bts=b''
        imgs = []
        start_time = time.time()
        sample_time = time.time()
        _LENGTH = 15
        gkey = ''

        for i in range(_LENGTH):
            gkey += random.choice(string.ascii_letters)

        while True:
            bts+=self.stream.read(4096)
            jpghead=bts.find(b'\xff\xd8')
            jpgend=bts.find(b'\xff\xd9')
            if self._isData():
                c = list(map(str,sys.stdin.readline().split()))
                self.change(c)
                self.tot_time = 0
                self.count = 0
                bts = b''
                continue

            if jpghead>-1 and jpgend>-1:
                jpg=bts[jpghead:jpgend+2]
                bts=bts[jpgend+2:]

                try:
                    img=cv2.imdecode(np.frombuffer(jpg,dtype=np.uint8),cv2.IMREAD_UNCHANGED)
                except Exception as e:
                    logger.info(e)
                    continue

                v=cv2.flip(img,0)
                h=cv2.flip(img,1)
                p=cv2.flip(img,-1)
                frame=p
                h,w=frame.shape[:2]
                
                if self.count<5:
                    print(frame.shape)
                    print(w,h)
                    
                img=cv2.resize(frame,(w,h))

                if debug:
                    cv2.imshow("a",img)

                end_time = time.time()
                a = end_time-start_time
                self.tot_time += end_time-start_time
                if save:
                    if not os.path.isdir('./imgs'):
                        os.mkdir('./imgs')
                    cv2.imwrite(f'./imgs/img_{self.count}.png',img)

                self.count+=1
                

                if time.time() - start_time > 3:
                    if time.time()-sample_time > 20:
                        sample_time = time.time()
                        gkey = ''
                        for i in range(_LENGTH):
                            self.reset = False
                            gkey += random.choice(string.ascii_letters)

                    start_time = time.time()
                    if send:
                        self.send_server(img,mode='kana',gkey=gkey,reset=self.reset)

            k=cv2.waitKey(1)
            if k & 0xFF==ord('q'):
                logger.info(self.tot_time/self.count)
                subprocess.call(['sudo','nmcli','con','down','KANA_CAM'])
                subprocess.call(['sudo','nmcli','con','delete' ,'KANA_CAM'])
                exit()
예제 #11
0
    args.view = None
else:
    args.model == 'GraphSage'

if args.model == 'HANSage':
    args.view = None

SEED = 2020
random.seed(SEED)
torch.manual_seed(SEED)
np.random.seed(SEED)
if args.cuda:
    torch.cuda.manual_seed(SEED)
    torch.cuda.set_device(setting.cuda_device_id)

logger.info(args)


def run(data_dir, model_path, embed_path, report_file):
    logger.info("loading dataset.")
    num_class = 2
    adj_lists, feat_data, labels = load_data(data_dir, args.view)
    data = dict()
    data['feat_data'] = feat_data
    data['labels'] = labels

    if args.model == "GraphSage":
        data['adj_lists'] = adj_lists[0]
        model = GraphSageHandler(num_class, data, args)
    elif args.model == "HANSage":
        data['adj_lists'] = adj_lists
예제 #12
0
 def load_checkpoint(self, checkpoint_file):
     self.build_model()
     checkpoint = torch.load(checkpoint_file)
     self.model.load_state_dict(checkpoint['state_dict'])
     self.model.eval()
     logger.info("load pretrain model from {}".format(checkpoint_file))
예제 #13
0
 def on_pong(self, data):
     logger.info({"on_pong", data})
예제 #14
0
    def filters(self, **kwargs):
        """
        Find the fields  when Index = True
        """
        n = datetime.now()
        cls = self.model_class

        #初始化参考参数
        select = ()
        field = value = None
        for k, v in kwargs.iteritems():
            if str(k).lower() != "select":
                field = str(k)
                value = v
            else:
                select = v
        #print "find " + "* " *20
        #print "field:",field,"value:",value

        ##解析 Find 字段
        if field.lower() != "id":
            #初始化find Index 指标
            #Index 保存结构: user:username:index , yangqun , 1
            index_name = cls.index_name(field)
            index_field = value
            #print "index_name,index_field",index_name,index_field
            #获取primary key
            pk = self.db.hget(index_name, index_field)
        else:
            if (isinstance(value, list) or isinstance(value, tuple)):
                #在这里处理传入一组数据的情况
                pks = value
                data = self.multi(pks, select)
                logger.info(
                    "type: %s,filter: %s, use: %s" %
                    (self.model_class.__name__, kwargs, datetime.now() - n))
                return data
            else:
                pk = value
                #Find 是否存在该用户
                hlen = self.db.hlen(cls.dump_fields_key(pk))
                if (hlen == 0):
                    return None
        ##End Find 字段

        #到用户表中查找
        if not select:
            #如果没有提交select字段select *
            #print "find key %s" % cls.dump_fields_key(pk)
            fields = self.db.hgetall(cls.dump_fields_key(pk))
        else:
            #print "find key %s" % cls.dump_fields_key(pk)
            selectlist = select.split(",")
            selectlist.append("id")
            fields = {}
            #如果提交select字段select field1,field2,......
            data = self.db.hmget(cls.dump_fields_key(pk), selectlist)
            for i in xrange(len(selectlist)):
                fields[selectlist[i]] = data[i]

        #print "field " + "* " * 20
        #print fields

        if fields:
            data = cls(**fields)
            logger.info(
                "type: %s,filter: %s, use: %s" %
                (self.model_class.__name__, kwargs, datetime.now() - n))
            return data
        else:
            return None
예제 #15
0
 def on_pong(self, data):
     logger.info({"on_pong", data})
예제 #16
0
from setting import logger
from utils.ap_check import wifi


if __name__=="__main__":
    wifi=wifi()


    url="http://192.168.4.1/SVGA"

    while True:
        if wifi.is_kana():

            print('KANA_CAM is in essid')
            wifi.connect_ap()

            url="http://192.168.4.1/SVGA"
            try:
                v = video(url)
                v.show(debug=True,send=False)

            except Exception as e:
                logger.info("Disconnect")
                logger.info("reaseon : " + e)
                subprocess.call(['sudo','nmcli','con','down','KANA_CAM'])
                subprocess.call(['sudo','nmcli','con','delete' ,'KANA_CAM'])
                cv2.destroyAllWindows()
                continue

        # else:
            # logger.info("No WiFi")
예제 #17
0
    def train_ddc(self, epoch=20, interval_val=100):
        """
        同时训练mmd损失函数和分类损失函数
        """
        self.build_model()
        self.hist_loss = []
        self.hist_evl_index = []

        target_data_iter = iter(self.ddc_target_data_loader)

        iiter = 0
        for ep in range(epoch):
            for x, y in self.train_data_loader:
                # print(x, y)
                # if iter >3:
                #     break
                iiter += 1
                self.model.train()
                nodes = [i.item() for i in x.squeeze_()]
                labels = y.squeeze_()
                start_time = time.time()
                out = self.model.forward(nodes)
                #print(out, labels)
                loss_clf = self.loss_func(out, labels)

                try:
                    x_target, _ = next(target_data_iter)
                except StopIteration:
                    target_data_iter = iter(self.ddc_target_data_loader)
                    x_target, _ = next(target_data_iter)

                source = self.model.get_embedding(nodes)
                target_nodes = [i.item() for i in x_target.squeeze_()]
                target = self.model.get_embedding(target_nodes)
                loss_mmd = compute_mmd(source, target)

                loss = loss_clf + loss_mmd
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
                self.hist_loss.append(loss.item())
                logger.info("epoch{}[iter{}]:{:.5f}s\tloss={:.5f}".format(
                    ep, iiter,
                    time.time() - start_time, loss.item()))

                # if iter % interval_val == 0:
                #     self.model.eval()
                #     logger.info("Eveluation on val dataset:")
                #     val_x, val_y = self.inputdata.get_val_data()
                #     val_output = self.model.forward(val_x)
                #     self.hist_evl_index.append(self.evl_index(val_y, val_output.cpu().data.numpy().argmax(axis=1), detail=False))

        logger.info("optimization finished! Eveluation on test dataset:")
        self.model.eval()
        rettttt = []
        with torch.no_grad():
            for year in [2015, 2016]:
                for month in range(1, 13):
                    perod = "{}-{}".format(year, month)
                    test_x, test_y = self.inputdata.get_test_data(perod)

                    batchs_outputs = []
                    while len(test_x) > 0:
                        batch_x, test_x = test_x[:128], test_x[128:]
                        batchs_outputs.append(self.model.forward(batch_x))

                    test_output = reduce(lambda x, y: torch.cat([x, y]),
                                         batchs_outputs)

                    pred_prob = F.softmax(test_output,
                                          dim=1).cpu().data.numpy()[:, 1]

                    ret_tuple = (test_y, pred_prob)

                    fpr, tpr, thresthods = metrics.roc_curve(test_y, pred_prob)
                    auc = metrics.auc(fpr, tpr)
                    print("AUC={:.4f}".format(auc))

                    f1, acc, precision, recall = self.evl_index(
                        test_y,
                        test_output.cpu().data.numpy().argmax(axis=1))

                    logger.info(
                        "{}: F1={:.4f}\tACC={:.4f}\tPrecision={:.4f}\tRecall={:.4f}\tauc={:.4f}, ."
                        .format(perod, f1, acc, precision, recall, auc))
                    rettttt.append([perod, f1, acc, precision, recall, auc])

                    self.hist_evl_index.append(
                        self.evl_index(
                            test_y,
                            test_output.cpu().data.numpy().argmax(axis=1),
                            detail=False))

        df = pd.DataFrame(
            rettttt,
            columns=["peroid", "f1", "acc", "precision", "recall", "auc"])
        # df.to_csv("/home/ypd-23-teacher-2/security/data_root/tesseract/rret/mamadroid")
        # return self.hist_evl_index[-1]
        return ret_tuple, df
예제 #18
0
def run(seed):
    # ========== load pretrained model ===============
    num_class = 2
    feat_data, labels, adj_lists, adj_matrix = load_hin(
        hin_path,
        args.view,
        args.num_neighs,
        args.num_neighs_tpl,
        args.num_neighs_permission,
        pre_max=setting.pre_max)
    num_nodes, feat_dim = feat_data.shape

    if args.m_file is not None:
        model_file = os.path.join(model_path, args.model, args.m_file)
    else:
        model_file = glob.glob(
            os.path.join(model_path, args.model,
                         "*{}*neigh{}*".format(args.view, args.num_neighs)))[0]

    logger.info("mdoel_file: {}".format(model_file))
    if args.model == "HANSage":
        model = load_hansage(model_file,
                             num_class,
                             num_nodes,
                             feat_data,
                             feat_dim,
                             adj_lists,
                             adj_matrix,
                             cuda=args.cuda,
                             num_sample_tpl=args.num_sample_tpl,
                             num_sample_permission=args.num_sample_permission,
                             embed_dim=args.embed_dim,
                             num_layers=2)
    elif args.model == "GraphSage":
        model = load_model(model_file,
                           num_class,
                           num_nodes,
                           feat_data,
                           feat_dim,
                           adj_lists,
                           adj_matrix,
                           cuda=args.cuda,
                           num_sample=args.num_sample,
                           embed_dim=args.embed_dim,
                           num_layers=2,
                           as_view=False)
    else:
        AssertionError("the model param is wrong!")
    logger.info(model)

    # ============= load data ===================
    ids = load_ids(setting.split_file_tmp.format(seed), labels)

    # ============= adversarial attack ==================
    if args.alg == 'fgsm':
        attacker = FGSM(model, args.max_bit, args.cuda)
    elif args.alg == 'jsma':
        attacker = JSMA(model, args.max_bit, args.cuda)
    else:
        AssertionError("alg is wrong!")

    r_codes = []
    iter = 0
    for id in ids:
        iter += 1
        init_x = feat_data[id]
        x = torch.Tensor([init_x])
        y = torch.LongTensor([1])
        r_code, _ = attacker.attack([id], x, y)
        logger.info("{}: id={}, r_code={}".format(iter, id, r_code))
        r_codes.append(r_code)
    score(r_codes,
          args.alg,
          args.max_bit,
          args.model,
          seed,
          model_file,
          save=True)
예제 #19
0
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,
                                                            reduction='none')

checkpoint_dir = CHECKPOINT_DIR
checkpoint_prefix = os.path.join(checkpoint_dir, PREFIX)
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
                                 encoder=encoder,
                                 decoder=decoder)

steps_per_epoch = len(en_seq_train) // BATCH_SIZE
for epoch in range(EPOCHS):
    start = time.time()

    enc_hidden = encoder.initialize_hidden_state()
    total_loss = 0

    for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):
        batch_loss = train_step(inp, targ, enc_hidden)
        total_loss += batch_loss
        logger.debug('{} {} loss: {}'.format(inp.shape, targ.shape,
                                             batch_loss))

    # saving (checkpoint) the model every 2 epochs
    if epoch % 10 == 0:
        checkpoint.save(file_prefix=checkpoint_prefix)

    logger.info('epoch {} loss {:.4f} -> {:.4f} taken'.format(
        epoch + 1, total_loss / steps_per_epoch,
        time.time() - start))
예제 #20
0
from aiohttp import web
from aiohttp_healthcheck import HealthCheck

from setting import logger
from view import handler_of_userinfo, health_check

app = web.Application()
# app = web.Application(loop=loop, middlewares=middlewares)
app.router.add_route('POST', '/useradd', handler_of_userinfo)
# app.router.add_route('GET', '/{name}', handler)
logger.info('Application started')

health = HealthCheck()
app.router.add_get("/healthcheck", health)
health.add_check(health_check)

예제 #21
0
    def ssh_base(self, ip, user, password, port, prompt):
        self.__fout = open(self.log_path, 'a')
        logger.info('Login with SSH')
        cmd = 'ssh -l %s %s -p %s' % (user, ip, port)
        logger.debug('pexpect spawn: %s', cmd)
        self.child = pexpect.spawn(cmd)
        self.child.logfile_read = self.__fout
        expect_string = "['(?i)host key verification failed',\
         '(?i)are you sure you want to continue connecting',\
         '(?i)password',\
         %s,\
         pexpect.TIMEOUT,\
         pexpect.EOF]" % prompt
        logger.debug('expect: %s', expect_string)
        i = self.child.expect([
            '(?i)host key verification failed',
            '(?i)are you sure you want to continue connecting', '(?i)password',
            prompt, pexpect.TIMEOUT, pexpect.EOF
        ])
        if i == 0:
            # Remote host identification has changed
            logger.debug('i == 0')
            try:
                info = self.child.before
                logger.debug('info:%s', info)
                key1 = 'Add correct host key in '
                key2 = ' to get rid of this message'
                index1 = info.index(key1) + len(key1)
                index2 = info.index(key2, index1)
                key3 = info[index1:index2]
                cmd2 = 'ssh-keygen -f "%s" -R %s' % (key3, ip)
                child2 = pexpect.spawn(cmd2)  # remove with: cmd
                child2.logfile_read = self.__fout
                child2.expect(pexpect.EOF)
            except Exception as e:
                ex = str(e)
                raise ExceptionRunin(ex)
            # Do it again
            logger.debug('pexpect spawn: %s', cmd)
            self.child = pexpect.spawn(cmd)
            self.child.logfile_read = self.__fout
            logger.debug('expect: %s', expect_string)
            i = self.child.expect([
                '(?i)host key verification failed',
                '(?i)are you sure you want to continue connecting',
                '(?i)password', prompt, pexpect.TIMEOUT, pexpect.EOF
            ])
            if i == 0:
                # This can not happend again, so there must be some error.
                logger.debug('i == 0')
                ex = 'ERROR! host key verification failed.'
                raise ExceptionRunin(ex)

        if i == 1:  # In this case SSH does not have the public key cached.
            logger.debug('i == 1')
            logger.info('send yes')
            self.child.sendline('yes')
            logger.debug('expect: %s', expect_string)
            i = self.child.expect([
                '(?i)host key verification failed',
                '(?i)are you sure you want to continue connecting',
                '(?i)password', prompt, pexpect.TIMEOUT, pexpect.EOF
            ])
        if i == 0 or i == 1:
            logger.debug('i == 0 or i == 1')
            # This can not happend again, so there must be some error.
            ex = 'ERROR! could not login with SSH.'
            raise ExceptionRunin(ex)
        if i == 2:
            logger.debug('i == 2')
            try:
                logger.debug('send password: %s', password)
                self.child.sendline(password)
                logger.debug('expect: [%s, password:]', prompt)
                j = self.child.expect([prompt, 'password:'******'j == 0')
                    # Entered the bmc, ok
                    logger.info('Login in successfully.')
                    return 0
                elif j == 1:
                    logger.debug('j == 1')
                    # password incorrect
                    logger.debug('incorrect password: %s', password)
                    ex = 'ERROR! could not login with SSH.'
                    raise ExceptionRunin(ex)
            except Exception as e:
                ex = str(e)
                logger.critical(ex)
                ex = 'ERROR! could not login with SSH.'
                raise ExceptionRunin(ex)
        if i == 3:
            # This may happen if a public key was setup to automatically login.
            # But beware, the COMMAND_PROMPT at this point is very trivial and
            # could be fooled by some output in the MOTD or login message.
            logger.debug('i == 3')
            logger.info('Login successfully.')
            return 0
        if i == 4:  # Timeout
            logger.debug('i == 4')
            ex = 'ERROR! login with SSH TIMEOUT.'
            logger.critical(self.child.before)
            raise ExceptionRunin('Exception: EOF!')
        if i == 5:  # EOF
            logger.debug('i == 5')
            ex = 'ERROR! login with SSH EOF.'
            logger.critical(self.child.before)
            raise ExceptionRunin(ex)
예제 #22
0
    def filters(self,**kwargs):
        """
        Find the fields  when Index = True
        """
        n  = datetime.now()
        cls = self.model_class
        
        #初始化参考参数
        select = ()
        field = value =  None
        for k,v in kwargs.iteritems():
            if str(k).lower() != "select":
                field = str(k)
                value = v
            else:
                select = v
        #print "find " + "* " *20
        #print "field:",field,"value:",value
        
        ##解析 Find 字段
        if field.lower() != "id":
            #初始化find Index 指标
            #Index 保存结构: user:username:index , yangqun , 1
            index_name = cls.index_name(field)
            index_field = value
            #print "index_name,index_field",index_name,index_field
            #获取primary key
            pk = self.db.hget(index_name,index_field)
        else:
            if(isinstance(value,list) or isinstance(value,tuple)):
                #在这里处理传入一组数据的情况
                pks = value
                data = self.multi(pks,select)
                logger.info("type: %s,filter: %s, use: %s" % (self.model_class.__name__,kwargs,datetime.now() - n))
                return data
            else:
                pk = value
                #Find 是否存在该用户
                hlen = self.db.hlen(cls.dump_fields_key(pk))
                if (hlen==0):
                    return None
        ##End Find 字段


        #到用户表中查找
        if not select:
            #如果没有提交select字段select *
            #print "find key %s" % cls.dump_fields_key(pk)
            fields = self.db.hgetall(cls.dump_fields_key(pk))
        else:
            #print "find key %s" % cls.dump_fields_key(pk)
            selectlist = select.split(",")
            selectlist.append("id")
            fields = {}
            #如果提交select字段select field1,field2,......
            data = self.db.hmget(cls.dump_fields_key(pk),selectlist)
            for i in xrange(len(selectlist)):
                fields[selectlist[i]] = data[i]
            
        #print "field " + "* " * 20
        #print fields
        
        if fields:
            data = cls(**fields)
            logger.info("type: %s,filter: %s, use: %s" % (self.model_class.__name__,kwargs,datetime.now() - n))
            return data
        else:
            return  None
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import warnings
pymysql.install_as_MySQLdb()
from setting import DATABASE, MYSQL_DB_CONFIG, logger
from ..models.model import DeclarativeBase

with warnings.catch_warnings():  # with warnings
    warnings.simplefilter('ignore')  # if warning just ignore that
    try:
        db_engine = create_engine(
            'mysql+pymysql://{user}:{password}@{host}:{port}/{db_name}'.format(
                **MYSQL_DB_CONFIG))
        db_engine.execute("CREATE DATABASE IF NOT EXISTS {} ".format(
            MYSQL_DB_CONFIG.get("db_name")))  # create the engin
        logger.info("mysql database connected: {}".format(db_engine))
        DeclarativeBase.metadata.create_all(db_engine)  # create all engine
    except:
        raise Exception(
            "Connection error")  # if database not created or connection failed


def db_session():
    """ this method is used to create the session"""
    try:
        if db_engine:  # if db_engine
            Session = sessionmaker()  # create session using sessionmaker() lib
            Session.configure(bind=db_engine)  # configure engine
            session = Session()
            return session  # return session object
    except: