コード例 #1
0
ファイル: manage_client.py プロジェクト: bigbo/gearman_frame
 def __init__(self, host_list = ['0.0.0.0:5000']):
     """初始化服务端/客户端服务 """
     self.logger = Logger(logname='log/log.txt', loglevel = 3, callfile = __file__).get_logger()
     try:
         self.server = Admin(host_list)
         self.client = Client(host_list)
     except:
         print "Gearman server host port is error!"
         self.logger.error("Dispatch a task name %s, %r" %(task_name, json_data))
         sys.exit()
コード例 #2
0
#                   只管向server端发送任务,返回结果只现实任务是否被发送到server端,不统计worker端任务执行结果
#                 2.同步发送多项任务:
#                   向server端发送任务,返回的结果包含worker端执行结果反馈(长链接).可以再次捕获执行失败后的任务,进行相应处理
#                 3.需要读取配置文件,程序根目录下的config.yaml文件
# * *****************************************************************************/

import gearman
from gearman.constants import * 
import json
from clients import Client
import sys,threading
import yaml
import time
from loger import Logger

logger = Logger(logname='log/log.txt', loglevel=4, callfile=__file__).get_logger()

PLUGIN_NAME = None
DATA_LIST = []
IMPORT_MODULE = None
STATUS = 0

sys.path.insert(0,sys.path[0]+'/plugin')

def DEBUG_MODE(debug, data):
    if debug:
        print ('log:%s\n') % data
        logger.debug(('log:%s\n') % data)
        if debug == 2:
            raw_input('DEBUG,Press Enter to continue:')
コード例 #3
0
ファイル: manage_client.py プロジェクト: bigbo/gearman_frame
class Gearman_Manage(object):
    def __init__(self, host_list = ['0.0.0.0:5000']):
        """初始化服务端/客户端服务 """
        self.logger = Logger(logname='log/log.txt', loglevel = 3, callfile = __file__).get_logger()
        try:
            self.server = Admin(host_list)
            self.client = Client(host_list)
        except:
            print "Gearman server host port is error!"
            self.logger.error("Dispatch a task name %s, %r" %(task_name, json_data))
            sys.exit()


    def show_status(self):
        """查看server状态信息"""
        current_status = self.server.get_status()
        num = 0

        for status in current_status:
            print status

    def get_worker(self, task_name = None):
        """查看worker端状态信息"""
        workers = []
        for w in self.server.get_workers():
            if w['tasks']:
                workers.append( w )

        print "totla workers: %d" % (len(workers))

        if not task_name:
            for i in workers:
                print "the IP:[%s]---Worker_name:[%s]---Task_name:[%s]"%(i['ip'],i['client_id'],i['tasks'])
        else:
            for i in workers:
                if task_name and i['tasks'][0] == task_name:
                    print "the IP:[%s]---Worker_name:[%s]---Task_name:[%s]"%(i['ip'],i['client_id'],i['tasks'])
        return workers


    def send_task(self, task_name, json_data, priority=PRIORITY_NONE):
        """发送控制指令"""
        self.client.send_job(name=task_name, data=json.dumps(json_data),
                        wait_until_complete=False, background=True, priority=priority)
        print ("Dispatch a task name %s, %r" %(task_name, json_data))
        self.logger.info("Dispatch a task name %s, %r" %(task_name, json_data))


    def clear_workers(self, task_name = None,priority = PRIORITY_HIGH):
        """关闭worker"""
        current_status = self.server.get_status()
        num = 0

        if not task_name:
            print "I don't know which worker will be clear!"
            return

        if task_name == 'all':
            for status in current_status:
                num = 0
                num = int(status['workers'])
                for i in range(num):
                    self.send_task(status['task'], {'SHUTDOWN': True},priority)
                print "stop worker total:%d" % num
        else:
            for status in current_status:
                if status['task'] == task_name:
                    num = int(status['workers'])
                print status

            for i in range(num):
                self.send_task(task_name,{'SHUTDOWN': True},priority)
            print "stop worker total:%d" % num
            if num == 0:
                print "Task list no have name is '%s'  task!" % task_name

        return None

    def clear_server_list(self, task_name = None):
        """清理server job 队列"""
        current_status = self.server.get_status()

        if not task_name:
            print "I don't know clear which data list!"
            return
        if task_name == 'all':
            pass
        else:
            num = [i['queued'] for i in current_status if task_name == i['task']]
            print "the list len:%d" % num[0]
            self.server.empty_task(str(task_name))

    def start_server(self, prot = 5000):
        """启动服务器"""
        self.server.start_server(prot)
        self.logger.info("start server.")

    def stop_server(self):
        """停止服务器"""
        try:
            self.server.send_shutdown()
            self.logger.info("stop server.")
        except:
            print "server is not run!"

    def ping_server(self):
        """查看服务器连通状况"""
        try:
            print self.server.get_response_time()
        except:
            print "server is not run!"
コード例 #4
0
parser = argparse.ArgumentParser(description='face anto-spoofing')
parser.add_argument('--batch-size', default='128', type=int, help='train batch size')
parser.add_argument('--test-size', default='64', type=int, help='test batch size')
parser.add_argument('--save-path', default='./logs/', type=str, help='log save path')
parser.add_argument('--checkpoint', default='model.pth', type=str, help='pretrained model checkpoint')
parser.add_argument('--message', default='message', type=str, help='pretrained model checkpoint')
parser.add_argument('--epochs', default=101, type=int, help='train epochs')
parser.add_argument('--train', default=True, type=bool, help='train')
args = parser.parse_args()

save_path = args.save_path + f'{args.message}_{time_str}'

if not os.path.exists(save_path):
    os.mkdir(save_path)
logger = Logger(f'{save_path}/log.log')
logger.Print(args.message)

train_data, val_data, test_data = load_cisia_surf(train_size=args.batch_size,test_size=args.test_size)
model = Model(pretrained=False,num_classes=2)
criterion = nn.CrossEntropyLoss() 
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9,weight_decay=5e-4)
scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.95)

if use_cuda:
    model = model.cuda()
    criterion = criterion.cuda()

loss_history = []
eval_history = []