示例#1
0
    def main(self):

        current_path = os.path.dirname(os.path.abspath(__file__))
        cfg_file = os.path.join(current_path, 'cfg.json')

        try:
            with open(cfg_file) as confile:
                config = json.load(confile)
        except Exception as e:
            logging.error(e)

        g.HOSTNAME = config['hostname']
        g.DEBUG = config['debug']
        g.IP = config['ip']
        g.HEARTBEAT = config['heartbeat']
        g.TRANSFER = config['transfer']
        g.HTTP = config['http']
        g.COLLECTOR = config['collector']
        g.IGNORE = config['ignore']
        g.TAGS = config['default_tags']

        init_log(current_path)

        logging.info('starting status reporting thread....')
        status_report_thread = StatusReportThread(5, "StatusReportThread", 1)
        status_report_thread.start()

        logging.info('starting api thread....')
        api_thread = APIThread(1, "HTTPThread", 1)
        api_thread.start()

        logging.info('starting basic metric collecting thread....')
        basic_thread_collect = BasicCollectThread(2, "BasicCollectThread", 1)
        basic_thread_collect.start()

        logging.info('starting iis metric collecting thread....')
        iis_thread_collect = IISCollectThread(3, "IISCollectThread", 1)
        iis_thread_collect.start()

        # logging.info('starting sqlserver metric collecting thread....')
        # sqlserver_thread_collect = SQLServerCollectThread(
        #     4, "SQLServerCollectThread", 1)
        # sqlserver_thread_collect.start()

        logging.info('starting proc metric collecting thread....')
        proc_thread_collect = ProcCollectThread(6, "ProcCollectThread", 1)
        proc_thread_collect.start()

        while self.isAlive:
            time.sleep(60)
        logging.error('end of main, should never going to here')
示例#2
0
def call_parser():
    """call the args parser."""

    print(APP_TITLE_ASCII_ART)

    parser = make_parser()

    # show help message when no option is given
    if not sys.argv[1:]:
        parser.print_help()
        parser.exit()

    args = parser.parse_args()

    # set logger
    if args.verbose:
        args.level = 'verbose'
    init_log(args.level, args.logfile)

    # option: -a/-all
    if args.all:
        show_all()
        parser.exit()

    try:
        sub = getattr(args, 'which')
    except AttributeError:
        pass
    else:
        if sub == 'new':
            option_interface(
                framework_name=args.framework_name,
                target_project_path=args.dir,
                static_path=args.static_path,
                web_static_root=args.web_static_root,
                alias=args.alias,
                dis_suffix=args.dis_suffix
            )
        parser.exit()

    return args
示例#3
0
def run():
    """
    后端爬虫入口
    :return:
    """
    init_log()
    loop = asyncio.get_event_loop()
    scheduler = AsyncIOScheduler()

    scheduler.add_listener(
        spider_listener,
        mask=EVENT_JOB_MAX_INSTANCES | EVENT_JOB_ERROR | EVENT_JOB_MISSED,
    )

    asyncio.ensure_future(init_task(loop=loop), loop=loop)

    scheduler.add_job(
        func=refresh_task,
        args=(loop, scheduler),
        trigger="cron",
        second="*/10",
        misfire_grace_time=600,
        max_instances=2,
        coalesce=True,
        id="refresh-task",
    )
    scheduler.start()

    asyncio.ensure_future(save.consume(loop, save.save_queue), loop=loop)

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        logger.info(f"退出......")
        executor.shutdown(wait=False)
        scheduler.shutdown(wait=False)
        loop.close()
示例#4
0
    def __init__(self,
                 configfile='config.ini',
                 logfile='dopq.log',
                 debug=False):

        # init logging
        self.logger = log.init_log(logfile)

        # get settings from config
        if not os.path.isfile(configfile):
            self.write_default_config(configfile)

        # init member variables
        self.starttime = None
        self.client = docker.from_env()
        self.debug = debug
        self.configfile = configfile
        self.logfile = logfile
        self.config = self.parse_config(configfile)
        self.paths = self.config['paths']
        self.history_file = 'history.dill'
        self.container_list_file = 'container_list.dill'
        self.running_containers_file = 'running_containers.dill'
        self.container_list = []
        self.running_containers = []
        self.history = []
        self.mapping = self.restore('all')

        # init helper processes and classes
        self.queue = mp.Queue()
        self.gpu_handler = gh.GPUHandler()
        self.provider = provider.Provider(self.config, self.queue)

        # build all non-existent directories, except the network container share
        keys = list(self.paths.keys())
        for key in keys:
            if key != 'network_containers':
                if not os.path.isdir(self.paths[key]):
                    os.makedirs(self.paths[key])

        # initialize process variable and termination flag
        super(DopQ, self).__init__()

        # initialize interface as a thread (so that members of the queue are accessible by the interface)
        self.thread = threading.Thread(target=self.run_queue)
示例#5
0
    parser = argparse.ArgumentParser(
        'Generate dataset. '
        'Usage: python DataGenerator.py mixed_modes_selected.txt --bias 0 --K 15 --noise 0.02'
    )
    parser.add_argument('mode_filename')
    parser.add_argument('--noise', type=float)
    parser.add_argument('--bias', type=int)
    parser.add_argument('--K', type=int)
    args = parser.parse_args()
    mode_filename = args.mode_filename
    noise = args.noise
    bias = args.bias
    K = args.K

    # logger
    init_log('./logs/data_gen_K_{:02d}_noise_{:04d}_bias_{:03d}'.format(
        K, int(noise * 1000), bias))

    # generate
    logging.info(
        'Generating train data of K_{:02d}_noise_{:04d}_bias_{:03d}'.format(
            K, int(noise * 1000), bias))
    data_train = DataGenerator(filename=mode_filename,
                               num_one_class=NUM_ONE_CLASS_TRAIN,
                               noise_factor=noise,
                               n_sampled=K,
                               bias=bias)
    data_train.run()
    logging.info(
        'Generating test data of K_{:02d}_noise_{:04d}_bias_{:03d}'.format(
            K, int(noise * 1000), bias))
    data_test = DataGenerator(filename=mode_filename,
示例#6
0
def main():
    """Create the model and start the training."""

    cudnn.enabled = True
    cudnn.benchmark = True

    device = torch.device("cuda" if not args.cpu else "cpu")

    snapshot_dir = os.path.join(args.snapshot_dir, args.experiment)
    os.makedirs(snapshot_dir, exist_ok=True)

    log_file = os.path.join(args.log_dir, '%s.txt' % args.experiment)
    init_log(log_file, args)

    # =============================================================================
    # INIT G
    # =============================================================================
    if MODEL == 'ResNet':
        model = Res_Deeplab(num_classes=args.num_classes,
                            restore_from=args.restore_from)
    model.train()
    model.to(device)

    # DataLoaders
    trainloader = data.DataLoader(GTA5DataSet(args.data_dir,
                                              args.data_list,
                                              max_iters=args.num_steps *
                                              args.iter_size * args.batch_size,
                                              crop_size=args.input_size_source,
                                              scale=True,
                                              mirror=True,
                                              mean=IMG_MEAN),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  pin_memory=True)
    trainloader_iter = enumerate(trainloader)

    #   trainloader = data.DataLoader(cityscapesDataSetLabel(args.data_dir_target, './dataset/cityscapes_list/info.json', args.data_list_target,args.data_list_label_target,
    #                                                   max_iters=args.num_steps * args.iter_size * args.batch_size, crop_size=args.input_size_target,
    #                                                   mean=IMG_MEAN, set=args.set), batch_size=args.batch_size, shuffle=True,num_workers=args.num_workers, pin_memory=True)

    trainloader_iter = enumerate(trainloader)

    # Optimizers
    optimizer = optim.SGD(model.optim_parameters(args),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()

    interp = nn.Upsample(size=(args.input_size_source[1],
                               args.input_size_source[0]),
                         mode='bilinear',
                         align_corners=True)
    #interp = nn.Upsample(size=(args.input_size_target[1], args.input_size_target[0]), mode='bilinear', align_corners=True)

    # ======================================================================================
    # Start training
    # ======================================================================================
    log_message('###########   TRAINING STARTED  ############', log_file)
    start = time.time()

    for i_iter in range(args.num_steps):

        optimizer.zero_grad()
        adjust_learning_rate(optimizer, i_iter)

        # ======================================================================================
        # train G
        # ======================================================================================

        # Train with Source
        _, batch = next(trainloader_iter)
        images_s, labels_s = batch
        images_s = images_s.to(device)

        pred_source1, pred_source2 = model(images_s)

        pred_source1 = interp(pred_source1)
        pred_source2 = interp(pred_source2)
        # Segmentation Loss
        loss_seg = (loss_calc(pred_source1, labels_s, device) +
                    loss_calc(pred_source2, labels_s, device))
        loss_seg.backward()

        optimizer.step()

        if i_iter % 10 == 0:
            log_message(
                'Iter = {0:6d}/{1:6d}, loss_seg = {2:.4f}'.format(
                    i_iter, args.num_steps - 1, loss_seg), log_file)

        if (i_iter % args.save_pred_every == 0
                and i_iter != 0) or i_iter == args.num_steps - 1:
            print('saving weights...')
            torch.save(model.state_dict(),
                       osp.join(snapshot_dir, 'GTA5_' + str(i_iter) + '.pth'))

    end = time.time()
    log_message(
        'Total training time: {} days, {} hours, {} min, {} sec '.format(
            int((end - start) / 86400), int((end - start) / 3600),
            int((end - start) / 60 % 60), int((end - start) % 60)), log_file)
    print('### Experiment: ' + args.experiment + ' Finished ###')
        Y_pred[cluster_true, label] += 1
    Y_pred_sorted = np.array(sorted(Y_pred.tolist(),
                                    key=lambda x: x.index(max(x))),
                             dtype=int)

    # 计算准确率
    true_pred_cnt = Y_pred_sorted.max(axis=0)
    results['acc'] = sum(true_pred_cnt) / (num_each_class * num_class)

    return results


if __name__ == '__main__':

    # logger
    init_log('logs/svm_search')

    # 参数设置
    parser = argparse.ArgumentParser(''
                                     'Usage: python classifier_svm.py --K 7')
    parser.add_argument('--K', type=int)
    parser.add_argument('--num_each_class', type=int, default=1000)
    args = parser.parse_args()
    K = args.K
    num_each_class = args.num_each_class

    acc_list = []
    for noise in PARAMS_DATASET['noise']:
        for bias in PARAMS_DATASET['bias']:

            # 加载数据
示例#8
0
from twilio.rest import Client
import utils.param as param
from utils.log import init_log
from utils.log import output_log


def send_twilio_message(sms_message):
    # setup twilio client
    twilio_client = Client(param.TWILIO_ACCOUNT_SID, param.TWILIO_AUTH_TOKEN)

    message = twilio_client.messages.create(body=sms_message,
                                            from_=param.FROM_PHONE_NO,
                                            to=param.TO_PHONE_NO)

    output_log('SMS Message Id: %s ' % message.sid)
    return message


if __name__ == "__main__":
    init_log()
    output_log(dir())
    send_twilio_message('testing')
示例#9
0
                         'GTA5_' + str(args.num_steps) + '_D.pth'))
            break

        if i_iter % args.save_pred_every == 0 and i_iter != 0:
            print('taking snapshot ...')
            torch.save(
                model.state_dict(),
                osp.join(args.snapshot_dir,
                         'GTA5_' + str(i_iter + 6000) + '.pth'))
            torch.save(
                model_D.state_dict(),
                osp.join(args.snapshot_dir,
                         'GTA5_' + str(i_iter + 6000) + '_D.pth'))

    end = time.time()
    log_message(
        'Total training time: {} days, {} hours, {} min, {} sec '.format(
            int((end - start) / 86400), int((end - start) / 3600),
            int((end - start) / 60), int((end - start) % 60)), LOG_FILE)


if __name__ == '__main__':
    os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')
    memory_gpu = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]
    os.system('rm tmp')
    gpu_target = str(np.argmax(memory_gpu))
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_target
    init_log(LOG_FILE)
    log_message('Training on GPU ' + gpu_target, LOG_FILE)
    main()
示例#10
0
import logging
from utils.log import init_log

__all__ = [
    'log',
]

log = init_log(logging.INFO)
示例#11
0
def get_logger():
    log_loc = os.path.dirname(os.path.dirname(os.path.abspath(
        __file__))) + os.path.sep + 'log' + os.path.sep + 'brokertab.log'
    return init_log(log_loc, "brokertab")
示例#12
0
import time
import json
import logging
# from random import choice, sample

import redis

from utils import conf_parser, log

if conf_parser.VERBOSE:
    level = 'DEBUG'
else:
    level = 'INFO'

log_file = os.path.join(conf_parser.LOG_PATH, 'db_engine.log')
log.init_log(log_path=log_file, level=level)


class RedisEngine(object):
    def __init__(self):
        self.host = conf_parser.REDIS_HOST
        self.port = conf_parser.REDIS_PORT
        self.db = conf_parser.REDIS_DB
        self.password = conf_parser.REDIS_PASSWORD
        self.client = self._connect()
        self.activate_key = 'Proxy:Activate'
        self.proxy_key = '{ip}:{port}'
        self.exists = lambda key: self.client.exists(key)

    def _connect_pool(self):
        pool = redis.ConnectionPool(
示例#13
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from utils import data_source
from utils import infer
from utils import log
from utils import model
from utils import train

EMBED_FILES = {
    'crawl': '../model/crawl-300d-2M.vec',
    'wiki': '../model/wiki.en.bin',
    'glove': '../model/glove.840B.300d.txt'
}

log_dir = log.init_log()
train_fold = True


def main():
    # if han sent_flag=True
    toxic_data = data_source.DataSource(EMBED_FILES,
                                        embed_flag='crawl',
                                        sent_flag=False)
    print(toxic_data.description())

    train_model = model.IndRNNModel(toxic_data)

    if train_fold:
        result_model = train.train_folds(train_model, 10, log_dir)
    else:
示例#14
0
def create_app():
    init_log()
    __import_module('./app/apis')
    app = tornado.web.Application(route.urls, **settings)
    return app
示例#15
0
def main():
    """Create the model and start the training."""

    cudnn.enabled = True
    cudnn.benchmark = True

    device = torch.device("cuda" if not args.cpu else "cpu")

    random.seed(args.random_seed)

    snapshot_dir = os.path.join(args.snapshot_dir, args.experiment)
    log_dir = os.path.join(args.log_dir, args.experiment)
    os.makedirs(log_dir, exist_ok=True)
    os.makedirs(snapshot_dir, exist_ok=True)

    log_file = os.path.join(log_dir, 'log.txt')

    init_log(log_file, args)

    # =============================================================================
    # INIT G
    # =============================================================================
    if MODEL == 'ResNet':
        model = Res_Deeplab(num_classes=args.num_classes,
                            restore_from=args.restore_from)
    model.train()
    model.to(device)

    # =============================================================================
    # INIT D
    # =============================================================================

    model_D = FCDiscriminator(num_classes=args.num_classes)

    # saved_state_dict_D = torch.load(RESTORE_FROM_D) #for retrain
    # model_D.load_state_dict(saved_state_dict_D)

    model_D.train()
    model_D.to(device)

    # DataLoaders
    trainloader = data.DataLoader(GTA5DataSet(args.data_dir,
                                              args.data_list,
                                              max_iters=args.num_steps *
                                              args.iter_size * args.batch_size,
                                              crop_size=args.input_size_source,
                                              scale=True,
                                              mirror=True,
                                              mean=IMG_MEAN),
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  pin_memory=True)
    trainloader_iter = enumerate(trainloader)

    targetloader = data.DataLoader(cityscapesDataSet(
        args.data_dir_target,
        args.data_list_target,
        max_iters=args.num_steps * args.iter_size * args.batch_size,
        crop_size=args.input_size_target,
        scale=True,
        mirror=True,
        mean=IMG_MEAN,
        set=args.set),
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=args.num_workers,
                                   pin_memory=True)
    targetloader_iter = enumerate(targetloader)

    # Optimizers
    optimizer = optim.SGD(model.optim_parameters(args),
                          lr=args.learning_rate,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)
    optimizer.zero_grad()
    optimizer_D = optim.Adam(model_D.parameters(),
                             lr=args.learning_rate_D,
                             betas=(0.9, 0.99))
    optimizer_D.zero_grad()

    # Losses
    bce_loss = torch.nn.BCEWithLogitsLoss()
    weighted_bce_loss = WeightedBCEWithLogitsLoss()

    interp_source = nn.Upsample(size=(args.input_size_source[1],
                                      args.input_size_source[0]),
                                mode='bilinear',
                                align_corners=True)
    interp_target = nn.Upsample(size=(args.input_size_target[1],
                                      args.input_size_target[0]),
                                mode='bilinear',
                                align_corners=True)

    # Labels for Adversarial Training
    source_label = 0
    target_label = 1

    # ======================================================================================
    # Start training
    # ======================================================================================
    print('###########   TRAINING STARTED  ############')
    start = time.time()

    for i_iter in range(args.start_from_iter, args.num_steps):

        optimizer.zero_grad()
        adjust_learning_rate(optimizer, i_iter)

        optimizer_D.zero_grad()
        adjust_learning_rate_D(optimizer_D, i_iter)

        damping = (1 - (i_iter) / NUM_STEPS)

        # ======================================================================================
        # train G
        # ======================================================================================

        # Remove Grads in D
        for param in model_D.parameters():
            param.requires_grad = False

        # Train with Source
        _, batch = next(trainloader_iter)
        images_s, labels_s, _, _ = batch
        images_s = images_s.to(device)
        pred_source1, pred_source2 = model(images_s)

        pred_source1 = interp_source(pred_source1)
        pred_source2 = interp_source(pred_source2)

        # Segmentation Loss
        loss_seg = (loss_calc(pred_source1, labels_s, device) +
                    loss_calc(pred_source2, labels_s, device))
        loss_seg.backward()

        # Train with Target
        _, batch = next(targetloader_iter)
        images_t, _, _ = batch
        images_t = images_t.to(device)

        pred_target1, pred_target2 = model(images_t)

        pred_target1 = interp_target(pred_target1)
        pred_target2 = interp_target(pred_target2)

        weight_map = weightmap(F.softmax(pred_target1, dim=1),
                               F.softmax(pred_target2, dim=1))

        D_out = interp_target(
            model_D(F.softmax(pred_target1 + pred_target2, dim=1)))

        # Adaptive Adversarial Loss
        if i_iter > PREHEAT_STEPS:
            loss_adv = weighted_bce_loss(
                D_out,
                torch.FloatTensor(
                    D_out.data.size()).fill_(source_label).to(device),
                weight_map, Epsilon, Lambda_local)
        else:
            loss_adv = bce_loss(
                D_out,
                torch.FloatTensor(
                    D_out.data.size()).fill_(source_label).to(device))

        loss_adv = loss_adv * Lambda_adv * damping
        loss_adv.backward()

        # Weight Discrepancy Loss
        W5 = None
        W6 = None
        if args.model == 'ResNet':

            for (w5, w6) in zip(model.layer5.parameters(),
                                model.layer6.parameters()):
                if W5 is None and W6 is None:
                    W5 = w5.view(-1)
                    W6 = w6.view(-1)
                else:
                    W5 = torch.cat((W5, w5.view(-1)), 0)
                    W6 = torch.cat((W6, w6.view(-1)), 0)

        loss_weight = (torch.matmul(W5, W6) /
                       (torch.norm(W5) * torch.norm(W6)) + 1
                       )  # +1 is for a positive loss
        loss_weight = loss_weight * Lambda_weight * damping * 2
        loss_weight.backward()

        # ======================================================================================
        # train D
        # ======================================================================================

        # Bring back Grads in D
        for param in model_D.parameters():
            param.requires_grad = True

        # Train with Source
        pred_source1 = pred_source1.detach()
        pred_source2 = pred_source2.detach()

        D_out_s = interp_source(
            model_D(F.softmax(pred_source1 + pred_source2, dim=1)))

        loss_D_s = bce_loss(
            D_out_s,
            torch.FloatTensor(
                D_out_s.data.size()).fill_(source_label).to(device))

        loss_D_s.backward()

        # Train with Target
        pred_target1 = pred_target1.detach()
        pred_target2 = pred_target2.detach()
        weight_map = weight_map.detach()

        D_out_t = interp_target(
            model_D(F.softmax(pred_target1 + pred_target2, dim=1)))

        # Adaptive Adversarial Loss
        if (i_iter > PREHEAT_STEPS):
            loss_D_t = weighted_bce_loss(
                D_out_t,
                torch.FloatTensor(
                    D_out_t.data.size()).fill_(target_label).to(device),
                weight_map, Epsilon, Lambda_local)
        else:
            loss_D_t = bce_loss(
                D_out_t,
                torch.FloatTensor(
                    D_out_t.data.size()).fill_(target_label).to(device))

        loss_D_t.backward()

        optimizer.step()
        optimizer_D.step()

        if (i_iter) % 10 == 0:
            log_message(
                'Iter = {0:6d}/{1:6d}, loss_seg = {2:.4f} loss_adv = {3:.4f}, loss_weight = {4:.4f}, loss_D_s = {5:.4f} loss_D_t = {6:.4f}'
                .format(i_iter, args.num_steps, loss_seg, loss_adv,
                        loss_weight, loss_D_s, loss_D_t), log_file)

        if (i_iter % args.save_pred_every == 0
                and i_iter != 0) or i_iter == args.num_steps - 1:
            i_iter = i_iter if i_iter != self.num_steps - 1 else i_iter + 1  # for last iter
            print('saving weights...')
            torch.save(model.state_dict(),
                       osp.join(snapshot_dir, 'GTA5_' + str(i_iter) + '.pth'))
            torch.save(
                model_D.state_dict(),
                osp.join(snapshot_dir, 'GTA5_' + str(i_iter) + '_D.pth'))

    end = time.time()
    log_message(
        'Total training time: {} days, {} hours, {} min, {} sec '.format(
            int((end - start) / 86400), int((end - start) / 3600),
            int((end - start) / 60 % 60), int((end - start) % 60)), log_file)
    print('### Experiment: ' + args.experiment + ' finished ###')
示例#16
0
def init():
    log.init_log(conf.log_path, conf.log_level)
示例#17
0
import json
import logging
import configparser

import numpy as np
import jieba
import paddle
from paddle.static import InputSpec
from paddle.io import DataLoader
from tqdm import tqdm
import paddle_serving_client.io as serving_io

import utils.log as log
from models.text_cnn import TextCNN
from datasets.feed_class_dataset import FeedClassDataset
log.init_log('log/train_feed_class')

# 读取配置
config = configparser.ConfigParser()
config.read('config/textcnn_feed_class.conf')

TRAIN_PATH = config.get('data', 'train_path')
VALID_PATH = config.get('data', 'valid_path')
TEST_PATH = config.get('data', 'test_path')
EMBEDDING_PATH = config.get('data', 'embedding_path')
VOCAB_PATH = config.get('data', 'vocab_path')
LABELS = config.get('data', 'labels')

MAX_SEQUENCE_LENGTH = int(config.get('train', 'max_sequence_length'))
BATCH_SIZE = int(config.get('train', 'batch_size'))
NUM_EPOCHS = int(config.get('train', 'num_epochs'))
    def run_predict(self, discrete=False):
        """主接口 -- 用于模型训练
        """
        if discrete:
            self.load()
            return self._discretize(self.data['X_total'])
        else:
            self.load()
            return self.data['X_total']


# 用于调试特征工程模块
if __name__ == '__main__':

    # logger
    init_log('./logs/FeatureEngineering')

    # config
    data_path = './data/data_noise_002_bias_0.json'
    num_classes = 23
    num_each_class = 100
    test_size = 0.2
    random_state = 1

    # 实例化数据加载类
    data = DataLoader(file_path=data_path,
                      num_classes=num_classes,
                      num_each_class=num_each_class,
                      labels=LABEL_SELECTED)

    # 加载,标签选取,并划分
示例#19
0
def main(args):
    os.makedirs(args.log_dir, exist_ok=True)
    log_file = join(args.log_dir, 'result.txt')
    init_log(log_file, args)
    compute_mIoU(args.gt_dir, args.pred_dir, args.devkit_dir, log_file)
示例#20
0
        help=
        '\'True\' means using TensorBoard, \'False\' means not using TensorBoard',
        type=bool,
        default=False)
    args = parser.parse_args()
    data_path = args.data_path
    data_type = data_path.split('/')[-1].split('.')[0]
    dense_size_list = args.dense_size_list if args.dense_size_list != 0 else DENSE_SIZE_LIST
    epoch = args.epoch
    batch_size = args.batch_size
    regular = args.regular
    dropout_size = args.dropout_size
    tb = args.tb

    # 日志
    log.init_log(os.path.join(WORK_DIR, 'logs', data_type))

    # 导入数据
    logging.info('Loading data: {}'.format(data_path))
    data = DataLoader(file_path=data_path,
                      num_classes=NUM_CLASSES,
                      num_each_class=NUM_EACH_CLASS,
                      labels=LABEL_SELECTED)
    data.run()

    # 模型保存路径
    model_dir = os.path.join(WORK_DIR, 'model', 'mlp', data_type)
    if not os.path.exists(model_dir):
        logging.info('Makedir: {}'.format(model_dir))
        os.makedirs(model_dir)
    save_dir_base = os.path.join(model_dir, 'mlp_dense_{dense}_batch_size_{batch_size}_regular_{regular}_dropout_{dropout}_'\
示例#21
0
                sock_master.close()
                sys.exit(1)
                
            t = Proxy(sock,remote_addr, remote_port)
            t.start()
            #t.join(10)
            Log('New clients from %s:%d' % addr)



if __name__ == '__main__':
    local_addr,local_port,remote_addr,remote_port,systemsign,maxsvr,debugflag,verbose = ParserClass().pares()
    #print proxy.py -l 127.0.0.1:8208 -r 127.0.0.1:3306 -v
    #python proxy.py -l 192.168.40.241:8008 -r 192.168.100.37:8008 -v
    print local_addr,local_port,remote_addr,remote_port,systemsign,maxsvr,debugflag,verbose
    log.init_log( systemsign ,LOGDIR = settings.LOGDIR, screen = True )

    try:
        sock_master = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock_master.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
    except socket.error,e:
        log.info( systemsign , 'Strange error creating socket: %s' ,str(e) )
        sys.exit(1)
    try:
        print local_addr, local_port
        sock_master.bind((local_addr, local_port))
        sock_master.listen(5)
    except socket.error,e:
        log.info( systemsign , 'socket bind error : %s',str(e) )
        sock_master.close()
        sys.exit(1)
示例#22
0
# -*- coding: utf-8 -*-
__author__ = 'liuxuexue'

import selenium
from appium import webdriver
import appium
import os
import time
from selenium.webdriver.support.ui import WebDriverWait
from common.variable import GetVariable as gv
from utils import log
import logging

log.init_log("../../logs/patient_app")


class OperateElement():
    def __init__(self, driver):
        self.driver = driver

    def find_element(self, selector, *args):
        """
        定位元素方法(用=>切割额字符串),args为空,为单元素定位;
        args不为空,为定位元素组,且args[0]的值为index
        :param selector:
        :param args:
        :return:
        """
        selector_by = selector.split('=>')[0]
        selector_value = selector.split('=>')[1]
        if args: