示例#1
0
import mysql.connector

from utils import consts
from utils.utils import init_log

log = init_log()


def select_all(table_name):
    return sql_run(f"SELECT * FROM test_1.{table_name};")


def sql_run(query):
    sql = Sql()
    try:
        if sql.connection.is_connected():
            db_Info = sql.connection.get_server_info()
            log.info(f"Connected to MySQL Server version {db_Info}")
            cursor = sql.connection.cursor()
            cursor.execute(query)
            record = cursor.fetchall()
            log.info(f"You're connected to database")
            return record
    except Exception as e:
        log.error(f"Error while connecting to MySQL {e}")
    finally:
        if sql.connection and (sql.connection.is_connected()):
            cursor.close()
            sql.connection.close()
            log.info(f"MySQL connection is closed")
def entrance(predicted_file_json_path: str, gold_json_file_path: str):
    score, message = evaluate_helmet_image_sgcc_score(
        predicted_file_json_path=predicted_file_json_path,
        gold_json_file_path=gold_json_file_path,
        iou_threshold=0.5,
        false_detection_weight=0.3,
        missed_detection_weight=0.5,
        object_detection_weight=0.2)
    if message != "评测成功":
        status = 0
    else:
        status = 1

    return score, message, status


if __name__ == "__main__":
    # initialize log output configuration
    init_log(logging.INFO)

    # set predicted and gold json file paths
    helmet_predicted_json_path = args.contestant_submitted_file_name
    helmet_gold_json_path = "test.json"

    sgcc_helmet_image_score = entrance(
        predicted_file_json_path=helmet_predicted_json_path,
        gold_json_file_path=helmet_gold_json_path)

    logger.info("sgcc helmet image score: {}".format(sgcc_helmet_image_score))
示例#3
0
def main():
    args = parse_args()

    # cudnn related setting
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED

    nnb = models.nnb.get_nnb(config)  # 不锁定参数  TODO: optimzer 中途添加参数
    # nnb = models.ae.get_ae()
    # nnb = models.fcn.get_fcn(config)
    # 训练时令nnc的softmax不起作用
    nnc = models.nnc.get_nnc(config)

    writer_dict = {
        'writer':
        SummaryWriter(log_dir='./output/facexray/tensorboard/tensorboard' +
                      '_' + datetime.now().strftime('%Y%m%d_%H%M%S')),
        'train_global_steps':
        0,
        'valid_global_steps':
        0,
        'test_global_steps':
        0,
    }

    # log init
    save_dir = os.path.join('./output/facexray/log/log' + '_' +
                            datetime.now().strftime('%Y%m%d_%H%M%S'))
    if os.path.exists(save_dir):
        raise NameError('model dir exists!')
    os.makedirs(save_dir)
    logging = init_log(save_dir)
    _print = logging.info

    gpus = list(config.GPUS)
    nnb = torch.nn.DataParallel(nnb, device_ids=[0]).cuda()
    nnc = torch.nn.DataParallel(nnc, device_ids=[0]).cuda()

    # define loss function (criterion) and optimizer
    criterion = Loss()

    # 一些参数
    # 初始化optimzer,训练除nnb的原hrnet参数外的参数
    optimizer = get_optimizer(config, [nnb, nnc])  # TODO: 暂时直接全部初始化
    NNB_GRAD = False
    nnb.module.pretrained_grad(NNB_GRAD)
    last_iter = config.TRAIN.BEGIN_ITER
    best_perf = 0.0

    if isinstance(config.TRAIN.LR_STEP, list):
        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,
            last_iter - 1)
    else:
        lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                       config.TRAIN.LR_STEP,
                                                       config.TRAIN.LR_FACTOR,
                                                       last_iter - 1)

    # Data loading code
    # transform还没能适用于其他规格,应做成[256, 256, 3]


#     train_dataset = eval('dataset.' + config.DATASET.TRAIN_SET + '.' + config.DATASET.TRAIN_SET)(
#         root=config.DATASET.TRAIN_ROOT, list_name=config.DATASET.TRAIN_LIST, mode='train', Transform='simple')

#     valid_dataset = eval('dataset.' + config.DATASET.EVAL_SET + '.' + config.DATASET.EVAL_SET)(
#         root=config.DATASET.VALID_ROOT, list_name=config.DATASET.VALID_LIST, mode='valid', Transform='simple')

#     test_dataset = eval('dataset.' + config.DATASET.EVAL_SET + '.' + config.DATASET.EVAL_SET)(
#         root=config.DATASET.TEST_ROOT, list_name=config.DATASET.TEST_LIST, mode='test', Transform='simple')
    train_dataset = mydataset(datapath + 'train15k', datapath + 'origin5k')
    valid_dataset = mydataset(datapath + 'generatorBlendedRandomGaussian',
                              datapath + 'origin')
    test_dataset = mydataset(datapath + 'test1k', datapath + 'test_o500')
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.TRAIN.BATCH_SIZE_PER_GPU * len(gpus),
        shuffle=config.TRAIN.SHUFFLE,
        num_workers=config.WORKERS,
        pin_memory=config.PIN_MEMORY)

    def cycle(loader):
        while True:
            for x in loader:
                yield x
            op = getattr(loader.dataset, "generate", None)
            if callable(op):
                op()

    train_generator = iter(cycle(train_loader))

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.TEST.BATCH_SIZE_PER_GPU * len(gpus),
        shuffle=False,
        num_workers=config.WORKERS,
        pin_memory=config.PIN_MEMORY)

    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=config.TEST.BATCH_SIZE_PER_GPU * len(gpus),
        shuffle=False,
        num_workers=config.WORKERS,
        pin_memory=config.PIN_MEMORY)

    for iteration in range(last_iter, config.TRAIN.END_ITER,
                           config.TRAIN.EVAL_ITER):

        # 前50000次迭代锁定原hrnet层参数训练,后面的迭代训练所有参数
        if not NNB_GRAD and iteration >= 50000:
            if len(gpus) > 0:
                nnb.module.pretrained_grad(True)
            else:
                nnb.pretrained_grad(True)
            NNB_GRAD = True

        # train for one epoch
        train(config,
              train_generator,
              nnb,
              nnc,
              criterion,
              optimizer,
              iteration,
              writer_dict,
              _print,
              lr_scheduler=lr_scheduler)
        # evaluate on validation set
        perf_indicator = validate(config, valid_loader, nnb, nnc, criterion,
                                  writer_dict, _print)
        test(config, test_loader, nnb, nnc, criterion, writer_dict, _print)

        # 保存目前准确率最高的模型
        # if perf_indicator > best_perf:
        #    best_perf = perf_indicator
        #    torch.save(model.module.state_dict(), './output/BI_dataset/bestfaceXray_'+str(best_perf)+'.pth')
        #    _print('[Save best model] ./output/BI_dataset/bestfaceXray_'+str(best_perf)+'.pth\t')

        iter_now = iteration + config.TRAIN.EVAL_ITER
        if (iteration // config.TRAIN.EVAL_ITER) % 2 == 0:
            torch.save(
                nnb.module.state_dict(),
                './output/BI_dataset2/faceXray_' + str(iter_now) + '.pth')
            torch.save(nnc.module.state_dict(),
                       './output/BI_dataset2/nnc' + str(iter_now) + '.pth')
            _print('[Save model] ./output/BI_dataset2/faceXray_' +
                   str(iter_now) + '.pth\t')
            _print('[Save the last model] ./output/BI_dataset2/nnc' +
                   str(iter_now) + '.pth\t')
        # lr_scheduler.step()

    # 最后的模型
    torch.save(nnb.module.state_dict(), './output/BI_dataset/faceXray.pth')
    torch.save(nnc.module.state_dict(), './output/BI_dataset/nnc.pth')
    _print('[Save the last model] ./output/BI_dataset/faceXray.pth\t')
    _print('[Save the last model] ./output/BI_dataset/nnc.pth\t')
    writer_dict['writer'].close()
示例#4
0
def main():
    args = parse_args()

    # cudnn related setting
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED

    model = eval('models.' + config.MODEL.NAME + '.get_nnb')(config)

    writer_dict = {
        'writer':
        SummaryWriter(log_dir='./output/facexray/tensorboard/tensorboard' +
                      '_' + datetime.now().strftime('%Y%m%d_%H%M%S')),
        'train_global_steps':
        0,
        'valid_global_steps':
        0,
    }

    # log init
    save_dir = os.path.join('./output/facexray/log/log' + '_' +
                            datetime.now().strftime('%Y%m%d_%H%M%S'))
    if os.path.exists(save_dir):
        raise NameError('model dir exists!')
    os.makedirs(save_dir)
    logging = init_log(save_dir)
    _print = logging.info

    gpus = list(config.GPUS)
    model = torch.nn.DataParallel(model, device_ids=gpus).cuda()

    # define loss function (criterion) and optimizer
    criterion = Loss()

    # 一些参数
    optimizer = get_optimizer(config, model)
    last_epoch = config.TRAIN.BEGIN_EPOCH
    best_perf = 0.0

    if isinstance(config.TRAIN.LR_STEP, list):
        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,
            last_epoch - 1)
    else:
        lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                       config.TRAIN.LR_STEP,
                                                       config.TRAIN.LR_FACTOR,
                                                       last_epoch - 1)

    # Data loading code
    # list_name没有单独标注在.yaml文件
    # transform还没能适用于其他规格,应做成[256, 256, 3]
    train_dataset = eval('dataset.' + config.DATASET.DATASET + '.' +
                         config.DATASET.DATASET)(config.DATASET.ROOT,
                                                 config.DATASET.TRAIN_SET,
                                                 None,
                                                 transforms.Compose([
                                                     transforms.Resize(256),
                                                     transforms.ToTensor()
                                                 ]))

    valid_dataset = eval('dataset.' + config.DATASET.DATASET + '.' +
                         config.DATASET.DATASET)(config.DATASET.ROOT,
                                                 config.DATASET.TEST_SET, None,
                                                 transforms.Compose([
                                                     transforms.Resize(256),
                                                     transforms.ToTensor()
                                                 ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.TRAIN.BATCH_SIZE_PER_GPU * len(gpus),
        shuffle=config.TRAIN.SHUFFLE,
        num_workers=config.WORKERS,
        pin_memory=config.PIN_MEMORY)

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.TEST.BATCH_SIZE_PER_GPU * len(gpus),
        shuffle=False,
        num_workers=config.WORKERS,
        pin_memory=config.PIN_MEMORY)

    for epoch in range(last_epoch, config.TRAIN.END_EPOCH):
        lr_scheduler.step()

        # 前50000次迭代锁定原hrnet层参数训练,后面的迭代训练所有参数
        if epoch == 25000:
            for k, v in model.named_parameters():
                v.requires_grad = True

        # train for one epoch
        train(config, train_loader, model, criterion, optimizer, epoch,
              writer_dict, _print)
        # evaluate on validation set
        perf_indicator = validate(config, valid_loader, model, criterion,
                                  writer_dict, _print)

        # 保存目前准确率最高的模型
        # if perf_indicator > best_perf:
        #    best_perf = perf_indicator
        #    torch.save(model.module.state_dict(), './output/BI_dataset/bestfaceXray_'+str(best_perf)+'.pth')
        #    _print('[Save best model] ./output/BI_dataset/bestfaceXray_'+str(best_perf)+'.pth\t')

        if epoch % 25000 == 0:
            torch.save(model.module.state_dict(),
                       './output/BI_dataset/faceXray_' + str(epoch) + '.pth')
            _print('[Save model] ./output/BI_dataset/faceXray_' + str(epoch) +
                   '.pth\t')

    # 最后的模型
    torch.save(model.module.state_dict(), './output/BI_dataset/faceXray.pth')
    _print('[Save the last model] ./output/BI_dataset/faceXray.pth\t')
    writer_dict['writer'].close()
def main_worker(args):
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))
    # Log in Tensorboard
    writer = SummaryWriter()
    # log init
    save_dir = os.path.join('logs',
                            'train' + '_' + datetime.now().strftime('%Y%m%d_%H%M%S'))
    if os.path.exists(save_dir):
        raise NameError('model dir exists!')
    os.makedirs(save_dir)
    logger = init_log(save_dir)

    train_dataset = labelFpsDataLoader("/home/can/AI_Camera/Dataset/License_Plate/CCPD2019/ccpd_base",
                                       preproc=preproc(cfg_plate['image_size'], (104, 117, 123)))
    # valid_dataset = ValDataset(os.path.join("./data/widerface/val", "data/train/label.txt"))

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
                                               num_workers=args.workers, collate_fn=detection_collate, pin_memory=True)
    # valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False,
    #                                            num_workers=args.workers, collate_fn=detection_collate, pin_memory=True)

    # Initialize model
    model = BaseModel(cfg=cfg_plate)

    checkpoint = []
    if args.resume is not None:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                # Map model to be loaded to specified single gpu.
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)
        params = checkpoint['parser']
        # args = params
        args.start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['state_dict'])
        del params
        del checkpoint

    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        model = model.cuda()
        print('Run with DataParallel ....')
        model = torch.nn.DataParallel(model).cuda()

    priorbox = PriorBox(cfg_plate)

    with torch.no_grad():
        priors = priorbox.forward()
        priors = priors.cuda()

    criterion = MultiBoxLoss(args.num_classes, 0.35, True, 0, True, 7, 0.35, False)
    # Define optimizer
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    # Define learning rate scheduler
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=5, verbose=True)
    logger.info('Step per opoch: {}'.format(len(train_loader)))

    # Start training per epoch
    recall, precision = 0, 0
    for epoch in range(args.start_epoch, args.epochs):
        train_loss = train(train_loader, model, priors, criterion, optimizer, scheduler, epoch, logger, args)

        # if epoch % args.eval_freq == 0:
        #     recall, precision = evaluate(valid_loader, model)
        #
        # logger.info('Recall: {:.4f} \t'
        #             'Prcision: {:.3f} \t'.format(recall, precision))

        # Log to Tensorboard
        lr = optimizer.param_groups[0]['lr']
        writer.add_scalar('model/train_loss', train_loss, epoch)
        writer.add_scalar('model/learning_rate', lr, epoch)
        # writer.add_scalar('model/precision', precision, epoch)
        # writer.add_scalar('model/recall', recall, epoch)

        # scheduler.step()
        scheduler.step(train_loss)
        state = {
            'epoch': epoch,
            'parser': args,
            'state_dict': get_state_dict(model)
        }
        torch.save(
            state,
            os.path.join(
                args.save_folder,
                args.network,
                "{}_{}.pth".format(args.network, epoch)))
示例#6
0
def main():
    args = parse_args()

    # cudnn related setting
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED

    nnb = models.nnb.get_nnb(config)  # 不锁定参数  TODO: optimzer 中途添加参数
    # nnb = models.ae.get_ae()
    # nnb = models.fcn.get_fcn(config)
    # 训练时令nnc的softmax不起作用
    nnc = models.nnc.get_nnc(config)

    writer_dict = {
        'writer':
        SummaryWriter(log_dir='./output/facexray/tensorboard/tensorboard' +
                      '_' + datetime.now().strftime('%Y%m%d_%H%M%S')),
        'train_global_steps':
        0,
        'valid_global_steps':
        0,
        'test_global_steps':
        0,
    }

    # log init
    save_dir = os.path.join('./output/facexray/log/log' + '_' +
                            datetime.now().strftime('%Y%m%d_%H%M%S'))
    if os.path.exists(save_dir):
        raise NameError('model dir exists!')
    os.makedirs(save_dir)
    logging = init_log(save_dir)
    _print = logging.info

    gpus = list(config.GPUS)
    nnb = torch.nn.DataParallel(nnb, device_ids=gpus).cuda()
    nnc = torch.nn.DataParallel(nnc, device_ids=gpus).cuda()

    # define loss function (criterion) and optimizer
    criterion = Loss()

    # 一些参数
    # 初始化optimzer,训练除nnb的原hrnet参数外的参数
    optimizer = get_optimizer(config, [nnb, nnc])  # TODO: 暂时直接全部初始化
    last_epoch = config.TRAIN.BEGIN_EPOCH
    best_perf = 0.0

    if isinstance(config.TRAIN.LR_STEP, list):
        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,
            last_epoch - 1)
    else:
        lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                       config.TRAIN.LR_STEP,
                                                       config.TRAIN.LR_FACTOR,
                                                       last_epoch - 1)

    # Data loading code
    # transform还没能适用于其他规格,应做成[256, 256, 3]
    train_dataset = eval('dataset.' + config.DATASET.DATASET + '.' +
                         config.DATASET.DATASET)(
                             root=config.DATASET.TRAIN_ROOT,
                             list_name=config.DATASET.TRAIN_LIST,
                             mode='train',
                             Transform='strong_pixel')

    valid_dataset = eval('dataset.' + config.DATASET.DATASET + '.' +
                         config.DATASET.DATASET)(
                             root=config.DATASET.VALID_ROOT,
                             list_name=config.DATASET.VALID_LIST,
                             mode='valid',
                             Transform='easy')

    test_dataset = eval('dataset.' + config.DATASET.DATASET + '.' +
                        config.DATASET.DATASET)(
                            root=config.DATASET.TEST_ROOT,
                            list_name=config.DATASET.TEST_LIST,
                            mode='test',
                            Transform='easy')

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.TRAIN.BATCH_SIZE_PER_GPU * len(gpus),
        shuffle=config.TRAIN.SHUFFLE,
        num_workers=config.WORKERS,
        pin_memory=config.PIN_MEMORY)

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.TEST.BATCH_SIZE_PER_GPU * len(gpus),
        shuffle=False,
        num_workers=config.WORKERS,
        pin_memory=config.PIN_MEMORY)

    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=config.TEST.BATCH_SIZE_PER_GPU * len(gpus),
        shuffle=False,
        num_workers=config.WORKERS,
        pin_memory=config.PIN_MEMORY)

    for epoch in range(last_epoch, config.TRAIN.END_EPOCH):

        # 前50000次迭代锁定原hrnet层参数训练,后面的迭代训练所有参数
        # 暂时先不管 warming up
        # if epoch == 25000:
        #     for k, v in nnb.named_parameters():
        #         v.requires_grad = True

        # train for one epoch
        train(config, train_loader, nnb, nnc, criterion, optimizer, epoch,
              writer_dict, _print)
        # evaluate on validation set
        perf_indicator = validate(config, valid_loader, nnb, nnc, criterion,
                                  writer_dict, _print)
        test(config, test_loader, nnb, nnc, criterion, writer_dict, _print)

        # 保存目前准确率最高的模型
        # if perf_indicator > best_perf:
        #    best_perf = perf_indicator
        #    torch.save(model.module.state_dict(), './output/BI_dataset/bestfaceXray_'+str(best_perf)+'.pth')
        #    _print('[Save best model] ./output/BI_dataset/bestfaceXray_'+str(best_perf)+'.pth\t')

        if epoch % 2 == 0:
            torch.save(nnb.module.state_dict(),
                       './output/BI_dataset2/faceXray_' + str(epoch) + '.pth')
            torch.save(nnc.module.state_dict(),
                       './output/BI_dataset2/nnc' + str(epoch) + '.pth')
            _print('[Save model] ./output/BI_dataset2/faceXray_' + str(epoch) +
                   '.pth\t')
            _print('[Save the last model] ./output/BI_dataset2/nnc' +
                   str(epoch) + '.pth\t')
        lr_scheduler.step()

    # 最后的模型
    torch.save(nnb.module.state_dict(), './output/BI_dataset/faceXray.pth')
    torch.save(nnc.module.state_dict(), './output/BI_dataset/nnc.pth')
    _print('[Save the last model] ./output/BI_dataset/faceXray.pth\t')
    _print('[Save the last model] ./output/BI_dataset/nnc.pth\t')
    writer_dict['writer'].close()