Exemple #1
0
def train(config):
    model = Mode(config, True)
    train_dataloader = torch.utils.data.DataLoader(
        COCODataset(config.train_list,
                    config.image_size,
                    True,
                    config.batch_size,
                    config.jitter,
                    shuffle=True,
                    seed=config.seed,
                    random=True,
                    num_workers=config.num_workers),
        batch_size=config.batch_size,
        shuffle=False,
        num_workers=config.num_workers,
        pin_memory=True)

    val_dataloader = torch.utils.data.DataLoader(
        COCODataset(config.val_list, config.image_size, False,
                    config.batch_size, config.jitter),
        batch_size=config.batch_size,
        shuffle=False,
        num_workers=config.num_workers,
        pin_memory=True)

    model.train(train_dataloader, val_dataloader)
Exemple #2
0
def evaluate(config, name):
    model = Mode(config, True)
    model.net.eval()

    val_dataloader = torch.utils.data.DataLoader(
        COCODataset(config.val_list,
                    config.image_size,
                    False,
                    config.batch_size,
                    config.jitter,
                    shuffle=False,
                    seed=0,
                    random=False,
                    num_workers=config.num_workers),
        batch_size=config.batch_size,
        shuffle=False,
        num_workers=config.num_workers,
        pin_memory=True)
    if name == 'coco':
        model.eval_coco(val_dataloader)
    elif name == 'voc':
        classes = [
            "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car",
            "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
            "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
        ]
        model.eval_voc(val_dataloader, classes)
Exemple #3
0
def evaluate(config, name):
    model = Mode(config, False)

    val_dataloader = torch.utils.data.DataLoader(
        COCODataset(config.val_list,
                    config.image_size,
                    False,
                    config.batch_size,
                    config.jitter,
                    shuffle=False,
                    seed=0,
                    random=False,
                    num_workers=config.num_workers),
        batch_size=config.batch_size,
        shuffle=False,
        num_workers=config.num_workers,
        pin_memory=True)
    if name == 'coco':
        model.eval_coco(val_dataloader)
    elif name == 'voc':
        model.eval_voc(val_dataloader)
def train(config):
    config["global_step"] = config.get("start_step", 0)
    is_training = False if config.get("export_onnx") else True

    # Load and initialize network
    net = ModelMain(config, is_training=is_training)
    net.train(is_training)

    # Optimizer and learning rate
    optimizer = _get_optimizer(config, net)
    lr_scheduler = optim.lr_scheduler.StepLR(
        optimizer,
        step_size=config["lr"]["decay_step"],
        gamma=config["lr"]["decay_gamma"])

    # Set data parallel
    net = nn.DataParallel(net)
    net = net.cuda()

    # Restore pretrain model
    if config["pretrain_snapshot"]:
        logging.info("Load pretrained weights from {}".format(
            config["pretrain_snapshot"]))
        state_dict = torch.load(config["pretrain_snapshot"])
        net.load_state_dict(state_dict)

    # Only export onnx
    # if config.get("export_onnx"):
    # real_model = net.module
    # real_model.eval()
    # dummy_input = torch.randn(8, 3, config["img_h"], config["img_w"]).cuda()
    # save_path = os.path.join(config["sub_working_dir"], "pytorch.onnx")
    # logging.info("Exporting onnx to {}".format(save_path))
    # torch.onnx.export(real_model, dummy_input, save_path, verbose=False)
    # logging.info("Done. Exiting now.")
    # sys.exit()

    # Evaluate interface
    # if config["evaluate_type"]:
    # logging.info("Using {} to evaluate model.".format(config["evaluate_type"]))
    # evaluate_func = importlib.import_module(config["evaluate_type"]).run_eval
    # config["online_net"] = net

    # YOLO loss with 3 scales
    yolo_losses = []
    for i in range(3):
        yolo_losses.append(
            YOLOLoss(config["yolo"]["anchors"][i], config["yolo"]["classes"],
                     (config["img_w"], config["img_h"])))

    # DataLoader
    dataloader = torch.utils.data.DataLoader(COCODataset(
        config["train_path"], (config["img_w"], config["img_h"]),
        is_training=True),
                                             batch_size=config["batch_size"],
                                             shuffle=True,
                                             num_workers=32,
                                             pin_memory=True)

    # Start the training loop
    logging.info("Start training.")
    for epoch in range(config["epochs"]):
        for step, samples in enumerate(dataloader):
            images, labels = samples["image"], samples["label"]
            start_time = time.time()
            config["global_step"] += 1

            # Forward and backward
            optimizer.zero_grad()
            outputs = net(images)
            losses_name = ["total_loss", "x", "y", "w", "h", "conf", "cls"]
            losses = []
            for _ in range(len(losses_name)):
                losses.append([])
            for i in range(3):
                _loss_item = yolo_losses[i](outputs[i], labels)
                for j, l in enumerate(_loss_item):
                    losses[j].append(l)
            losses = [sum(l) for l in losses]
            loss = losses[0]
            loss.backward()
            optimizer.step()

            if step > 0 and step % 10 == 0:
                _loss = loss.item()
                duration = float(time.time() - start_time)
                example_per_second = config["batch_size"] / duration
                lr = optimizer.param_groups[0]['lr']
                logging.info(
                    "epoch [%.3d] iter = %d loss = %.2f example/sec = %.3f lr = %.5f "
                    % (epoch, step, _loss, example_per_second, lr))
                config["tensorboard_writer"].add_scalar(
                    "lr", lr, config["global_step"])
                config["tensorboard_writer"].add_scalar(
                    "example/sec", example_per_second, config["global_step"])
                for i, name in enumerate(losses_name):
                    value = _loss if i == 0 else losses[i]
                    config["tensorboard_writer"].add_scalar(
                        name, value, config["global_step"])

        # if step > 0 and step % 1000 == 0:
        # net.train(False)
        # _save_checkpoint(net.state_dict(), config)
        # net.train(True)

        _save_checkpoint(net.state_dict(), config)
        lr_scheduler.step()

    # net.train(False)
    _save_checkpoint(net.state_dict(), config)
    # net.train(True)
    logging.info("Bye~")
Exemple #5
0
def train(config):
    config["global_step"] = config.get("start_step", 0)
    is_training = False if config.get("export_onnx") else True

    anchors = [int(x) for x in config["yolo"]["anchors"].split(",")]
    anchors = [[[anchors[i], anchors[i + 1]], [anchors[i + 2], anchors[i + 3]],
                [anchors[i + 4], anchors[i + 5]]]
               for i in range(0, len(anchors), 6)]
    anchors.reverse()
    config["yolo"]["anchors"] = []
    for i in range(3):
        config["yolo"]["anchors"].append(anchors[i])
    # Load and initialize network
    net = ModelMain_SN(config, is_training=is_training)
    net.train(is_training)

    # Optimizer and learning rate
    optimizer = _get_optimizer(config, net)
    lr_scheduler = optim.lr_scheduler.StepLR(
        optimizer,
        step_size=config["lr"]["decay_step"],
        gamma=config["lr"]["decay_gamma"])

    # Set data parallel
    net = nn.DataParallel(net)
    net = net.cuda()

    # Restore pretrain model
    if config["pretrain_snapshot"]:
        logging.info("Load pretrained weights from {}".format(
            config["pretrain_snapshot"]))
        state_dict = torch.load(config["pretrain_snapshot"])
        net.load_state_dict(state_dict)

    # YOLO loss with 3 scales
    yolo_losses = []
    for i in range(3):
        yolo_losses.append(
            YOLOLayer(config["batch_size"], i, config["yolo"]["anchors"][i],
                      config["yolo"]["classes"],
                      (config["img_w"], config["img_h"])))

    # DataLoader
    dataloader = torch.utils.data.DataLoader(COCODataset(
        config["train_path"], (config["img_w"], config["img_h"]),
        is_training=True,
        is_scene=False),
                                             batch_size=config["batch_size"],
                                             shuffle=True,
                                             drop_last=True,
                                             num_workers=0,
                                             pin_memory=True)

    # Start the training loop
    logging.info("Start training.")
    dataload_len = len(dataloader)
    for epoch in range(config["epochs"]):
        recall = 0
        mini_step = 0
        for step, samples in enumerate(dataloader):
            images, labels = samples["image"], samples["label"]
            start_time = time.time()
            config["global_step"] += 1
            for mini_batch in range(8):
                mini_step += 1
                # Forward and backward
                optimizer.zero_grad()
                outputs = net(images)
                losses_name = [
                    "total_loss", "x", "y", "w", "h", "conf", "cls", "recall"
                ]
                losses = [0] * len(losses_name)
                for i in range(3):
                    _loss_item = yolo_losses[i](outputs[i], labels)
                    for j, l in enumerate(_loss_item):
                        losses[j] += l
                # losses = [sum(l) for l in losses]
                loss = losses[0]
                loss.backward()
                optimizer.step()
                _loss = loss.item()
                # example_per_second = config["batch_size"] / duration
                # lr = optimizer.param_groups[0]['lr']

                strftime = datetime.datetime.now().strftime("%H:%M:%S")
                if (losses[7] / 3 >= recall / (step + 1)) or mini_batch == 7:
                    recall += losses[7] / 3
                    print(
                        '%s [Epoch %d/%d,batch %03d/%d loss:x %.5f,y %.5f,w %.5f,h %.5f,conf %.5f,cls %.5f,total %.5f,rec %.3f,avrec %.3f %d]'
                        %
                        (strftime, epoch, config["epochs"], step, dataload_len,
                         losses[1], losses[2], losses[3], losses[4], losses[5],
                         losses[6], _loss, losses[7] / 3, recall /
                         (step + 1), mini_batch))
                    break
                else:
                    print(
                        '%s [Epoch %d/%d,batch %03d/%d loss:x %.5f,y %.5f,w %.5f,h %.5f,conf %.5f,cls %.5f,total %.5f,rec %.3f,prerc %.3f %d]'
                        % (strftime, epoch, config["epochs"], step,
                           dataload_len, losses[1], losses[2], losses[3],
                           losses[4], losses[5], losses[6], _loss,
                           losses[7] / 3, recall / step, mini_batch))
        if (epoch % 2 == 0 and recall / len(dataloader) > 0.7
            ) or recall / len(dataloader) > 0.96:
            torch.save(
                net.state_dict(), '%s/%.4f_%04d.weights' %
                (checkpoint_dir, recall / len(dataloader), epoch))

        lr_scheduler.step()
    # net.train(True)
    logging.info("Bye bye")
Exemple #6
0
def evaluate(config):
    is_training = False
    # Load and initialize network
    net = ModelMain(config, is_training=is_training)
    net.train(is_training)

    # Set data parallel
    net = nn.DataParallel(net)
    net = net.cuda()

    # Restore pretrain model
    if config["pretrain_snapshot"]:
        state_dict = torch.load(config["pretrain_snapshot"])
        net.load_state_dict(state_dict)
    else:
        logging.warning("missing pretrain_snapshot!!!")

    # YOLO loss with 3 scales
    yolo_losses = []
    for i in range(3):
        yolo_losses.append(
            YOLOLoss(config["yolo"]["anchors"][i], config["yolo"]["classes"],
                     (config["img_w"], config["img_h"])))

    # DataLoader
    dataloader = torch.utils.data.DataLoader(COCODataset(
        config["val_path"], (config["img_w"], config["img_h"]),
        is_training=False),
                                             batch_size=config["batch_size"],
                                             shuffle=False,
                                             num_workers=16,
                                             pin_memory=False)

    # Start the eval loop
    logging.info("Start eval.")
    n_gt = 0
    correct = 0
    for step, samples in enumerate(dataloader):
        images, labels = samples["image"], samples["label"]
        labels = labels.cuda()
        with torch.no_grad():
            outputs = net(images)
            output_list = []
            for i in range(3):
                output_list.append(yolo_losses[i](outputs[i]))
            output = torch.cat(output_list, 1)
            output = non_max_suppression(output, 80, conf_thres=0.2)
            #  calculate
            for sample_i in range(labels.size(0)):
                # Get labels for sample where width is not zero (dummies)
                target_sample = labels[sample_i, labels[sample_i, :, 3] != 0]
                for obj_cls, tx, ty, tw, th in target_sample:
                    # Get rescaled gt coordinates
                    tx1, tx2 = config["img_w"] * (
                        tx - tw / 2), config["img_w"] * (tx + tw / 2)
                    ty1, ty2 = config["img_h"] * (
                        ty - th / 2), config["img_h"] * (ty + th / 2)
                    n_gt += 1
                    box_gt = torch.cat([
                        coord.unsqueeze(0) for coord in [tx1, ty1, tx2, ty2]
                    ]).view(1, -1)
                    sample_pred = output[sample_i]
                    if sample_pred is not None:
                        # Iterate through predictions where the class predicted is same as gt
                        for x1, y1, x2, y2, conf, obj_conf, obj_pred in sample_pred[
                                sample_pred[:, 6] == obj_cls]:
                            box_pred = torch.cat([
                                coord.unsqueeze(0)
                                for coord in [x1, y1, x2, y2]
                            ]).view(1, -1)
                            iou = bbox_iou(box_pred, box_gt)
                            if iou >= config["iou_thres"]:
                                correct += 1
                                break
        if n_gt:
            logging.info('Batch [%d/%d] mAP: %.5f' %
                         (step, len(dataloader), float(correct / n_gt)))

    logging.info('Mean Average Precision: %.5f' % float(correct / n_gt))
anchors = [[[anchors[i], anchors[i + 1]], [anchors[i + 2], anchors[i + 3]],
            [anchors[i + 4], anchors[i + 5]]]
           for i in range(0, len(anchors), 6)]
anchors.reverse()
config["yolo"]["anchors"] = []
for i in range(3):
    config["yolo"]["anchors"].append(anchors[i])
yolo_losses = []
for i in range(3):
    yolo_losses.append(
        YOLOLayer(config["batch_size"], i, config["yolo"]["anchors"][i],
                  config["yolo"]["classes"],
                  (config["img_w"], config["img_h"])))

dataloader = torch.utils.data.DataLoader(COCODataset(
    config["train_path"], (config["img_w"], config["img_h"]),
    is_training=True,
    is_scene=True),
                                         batch_size=config["batch_size"],
                                         shuffle=True,
                                         drop_last=True,
                                         num_workers=0,
                                         pin_memory=True)


def validate(net):
    n_gt = 0
    correct = 0
    for step, samples in enumerate(dataloader):
        images, labels, image_paths = samples["image"], samples[
            "label"], samples["img_path"]
        labels = labels.cuda()
Exemple #8
0
def train(config):
    config["global_step"] = config.get("start_step", 0)
    is_training = False if config.get("export_onnx") else True

    anchors = [int(x) for x in config["yolo"]["anchors"].split(",")]
    anchors = [[[anchors[i], anchors[i + 1]], [anchors[i + 2], anchors[i + 3]],
                [anchors[i + 4], anchors[i + 5]]]
               for i in range(0, len(anchors), 6)]
    anchors.reverse()
    config["yolo"]["anchors"] = []
    for i in range(3):
        config["yolo"]["anchors"].append(anchors[i])
    # Load and initialize network
    net = ModelMain(config, is_training=is_training)
    net.train(is_training)

    # Optimizer and learning rate
    optimizer = _get_optimizer(config, net)

    lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=15)
    # lr_scheduler = optim.lr_scheduler.StepLR(
    #     optimizer,
    #     step_size=config["lr"]["decay_step"],
    #     gamma=config["lr"]["decay_gamma"])

    # Set data parallel
    net = nn.DataParallel(net)
    net = net.cuda()
    # Restore pretrain model
    if config["pretrain_snapshot"]:
        logging.info("Load pretrained weights from {}".format(
            config["pretrain_snapshot"]))
        state_dict = torch.load(config["pretrain_snapshot"])
        net.load_state_dict(state_dict)

    # Only export onnx
    # if config.get("export_onnx"):
    # real_model = net.module
    # real_model.eval()
    # dummy_input = torch.randn(8, 3, config["img_h"], config["img_w"]).cuda()
    # save_path = os.path.join(config["sub_working_dir"], "pytorch.onnx")
    # logging.info("Exporting onnx to {}".format(save_path))
    # torch.onnx.export(real_model, dummy_input, save_path, verbose=False)
    # logging.info("Done. Exiting now.")
    # sys.exit()

    # Evaluate interface
    # if config["evaluate_type"]:
    # logging.info("Using {} to evaluate model.".format(config["evaluate_type"]))
    # evaluate_func = importlib.import_module(config["evaluate_type"]).run_eval
    # config["online_net"] = net

    # YOLO loss with 3 scales
    yolo_losses = []
    for i in range(3):
        yolo_losses.append(
            YOLOLayer(config["batch_size"], i, config["yolo"]["anchors"][i],
                      config["yolo"]["classes"],
                      (config["img_w"], config["img_h"])))

    # DataLoader
    dataloader = torch.utils.data.DataLoader(COCODataset(
        config["train_path"], (config["img_w"], config["img_h"]),
        is_training=True,
        is_scene=True),
                                             batch_size=config["batch_size"],
                                             shuffle=True,
                                             drop_last=True,
                                             num_workers=0,
                                             pin_memory=True)

    # Start the training loop
    logging.info("Start training.")
    dataload_len = len(dataloader)
    best_acc = 0.5
    for epoch in range(config["epochs"]):

        recall = 0
        mini_step = 0
        for step, samples in enumerate(dataloader):
            images, labels = samples["image"], samples["label"]
            config["global_step"] += 1
            # Forward and backward
            optimizer.zero_grad()
            outputs = net(images.cuda())
            losses_name = [
                "total_loss", "x", "y", "w", "h", "conf", "cls", "recall"
            ]
            losses = [0] * len(losses_name)
            for i in range(3):
                _loss_item = yolo_losses[i](outputs[i], labels)
                for j, l in enumerate(_loss_item):
                    losses[j] += l
            # losses = [sum(l) for l in losses]
            loss = losses[0]
            loss.backward()
            optimizer.step()
            _loss = loss.item()
            # example_per_second = config["batch_size"] / duration
            lr = optimizer.param_groups[0]['lr']

            strftime = datetime.datetime.now().strftime("%H:%M:%S")
            # if (losses[7] / 3 >= recall / (step + 1)):#mini_batch为0走这里
            recall += losses[7] / 3
            print(
                '%s [Epoch %d/%d,batch %03d/%d loss:x %.5f,y %.5f,w %.5f,h %.5f,conf %.5f,cls %.5f,total %.5f,rec %.3f,avrec %.3f %.3f]'
                % (strftime, epoch, config["epochs"], step, dataload_len,
                   losses[1], losses[2], losses[3], losses[4], losses[5],
                   losses[6], _loss, losses[7] / 3, recall / (step + 1), lr))

        if recall / len(dataloader) > best_acc:
            best_acc = recall / len(dataloader)
            if epoch > 0:
                torch.save(
                    net.state_dict(), '%s/%.4f_%04d.weights' %
                    (checkpoint_dir, recall / len(dataloader), epoch))

        lr_scheduler.step()
        net.train(is_training)
        torch.cuda.empty_cache()
    # net.train(True)
    logging.info("Bye bye")
Exemple #9
0
def evaluate(config):
    is_training = False
    # Load and initialize network
    net = ModelMain(config, is_training=is_training)
    net.train(is_training)

    # Set data parallel
    net = nn.DataParallel(net)
    net = net.cuda()

    # Restore pretrain model
    if config["pretrain_snapshot"]:
        state_dict = torch.load(config["pretrain_snapshot"])
        net.load_state_dict(state_dict)
    else:
        logging.warning("missing pretrain_snapshot!!!")

    # YOLO loss with 3 scales
    yolo_losses = []
    for i in range(3):
        yolo_losses.append(YOLOLoss(config["yolo"]["anchors"][i],
                                    config["yolo"]["classes"], (config["img_w"], config["img_h"])))

    # DataLoader
    dataloader = torch.utils.data.DataLoader(dataset=COCODataset(config["test_path"], config["img_w"]),
                                             batch_size=config["batch_size"],
                                             shuffle=False, num_workers=8, pin_memory=False)

    # Start the eval loop
    #logging.info("Start eval.")
    n_gt = 0
    correct = 0
    #logging.debug('%s' % str(dataloader))

    gt_histro={}
    pred_histro = {}
    correct_histro = {}

    for i in range(config["yolo"]["classes"]):
        gt_histro[i] = 1
        pred_histro[i] = 1
        correct_histro[i] = 0

    # images 是一个batch里的全部图片,labels是一个batch里面的全部标签
    for step, (images, labels) in enumerate(dataloader):
        labels = labels.cuda()
        with torch.no_grad():
            outputs = net(images)
            output_list = []
            for i in range(3):
                output_list.append(yolo_losses[i](outputs[i]))

            # 把三个尺度上的预测结果在第1维度(第0维度是batch里的照片,第1维度是一张照片里面的各个预测框,第2维度是各个预测数值)上拼接起来
            output = torch.cat(output_list, dim=1)

            #logging.info('%s' % str(output.shape))

            # 进行NMS抑制
            #output = non_max_suppression(prediction=output, num_classes=config["yolo"]["classes"], conf_thres=config["conf_thresh"], nms_thres=config["nms_thresh"])
            output = class_nms(prediction=output, num_classes=config["yolo"]["classes"],conf_thres=config["conf_thresh"], nms_thres=config["nms_thresh"])
            #  calculate
            for sample_i in range(labels.size(0)):

                # 计算所有的预测数量
                sample_pred = output[sample_i]
                if sample_pred is not None:
                    #logging.debug(sample_pred.shape)
                    for i in range(sample_pred.shape[0]):
                        pred_histro[int(sample_pred[i,6])] +=  1

                # Get labels for sample where width is not zero (dummies)
                target_sample = labels[sample_i, labels[sample_i, :, 3] != 0]
                # Ground truth的 分类编号obj_cls、相对中心x、相对中心y、相对宽w、相对高h
                n_gt=0
                correct=0
                for obj_cls, tx, ty, tw, th in target_sample:
                    # Get rescaled gt coordinates
                    # 转化为输入像素尺寸的 左上角像素tx1 ty1,右下角像素tx2 ty2
                    tx1, tx2 = config["img_w"] * (tx - tw / 2), config["img_w"] * (tx + tw / 2)
                    ty1, ty2 = config["img_h"] * (ty - th / 2), config["img_h"] * (ty + th / 2)
                    # 计算ground truth数量,用于统计信息
                    n_gt += 1
                    gt_histro[int(obj_cls)] += 1
                    # 转化为 shape(1,4)的tensor,用来计算IoU
                    box_gt = torch.cat([coord.unsqueeze(0) for coord in [tx1, ty1, tx2, ty2]]).view(1, -1)
                    # logging.info('%s' % str(box_gt.shape))

                    sample_pred = output[sample_i]
                    if sample_pred is not None:
                        # Iterate through predictions where the class predicted is same as gt
                        # 对于每一个ground truth,遍历预测结果
                        for x1, y1, x2, y2, conf, obj_conf, obj_pred in sample_pred[sample_pred[:, 6] == obj_cls]:  # 如果当前预测分类 == 当前真实分类
                            #logging.info("%d" % obj_cls)
                            box_pred = torch.cat([coord.unsqueeze(0) for coord in [x1, y1, x2, y2]]).view(1, -1)
                            #pred_histro[int(obj_pred)] += 1
                            iou = bbox_iou(box_pred, box_gt)
                            #if iou >= config["iou_thres"] and obj_conf >= config["obj_thresh"]:
                            if iou >= config["iou_thresh"]:
                                correct += 1
                                correct_histro[int(obj_pred)] += 1
                                break
                #logging.debug("----------------")
                #logging.debug(correct_histro[4])
                #logging.debug(pred_histro[4])
                #logging.debug(gt_histro[4])
    if n_gt:
        types = config["types"]

        reverse_types = {}  # 建立一个反向的types
        for key in types.keys():
            reverse_types[types[key]] = key

        #logging.info('Batch [%d/%d] mAP: %.5f' % (step, len(dataloader), float(correct / n_gt)))
        logging.info('Precision:%s' % str([reverse_types[i] +':'+ str(int(100 * correct_histro[i] / pred_histro[i])) for i in range(config["yolo"]["classes"]) ]))
        logging.info('Recall   :%s' % str([reverse_types[i] +':'+ str(int(100 * correct_histro[i] / gt_histro[i])) for i in range(config["yolo"]["classes"])]))
def evaluate(config):
    is_training = False
    # Load and initialize network
    net = ModelMain(config, is_training=is_training)
    net.train(is_training)

    # Set data parallel
    net = nn.DataParallel(net)
    net = net.cuda()

    # Restore pretrain model
    if config["pretrain_snapshot"]:
        logging.info("Load checkpoint: {}".format(config["pretrain_snapshot"]))
        state_dict = torch.load(config["pretrain_snapshot"])
        net.load_state_dict(state_dict)
    else:
        logging.warning("missing pretrain_snapshot!!!")

    # YOLO loss with 3 scales
    yolo_losses = []
    for i in range(3):
        yolo_losses.append(
            YOLOLoss(config["yolo"]["anchors"][i], config["yolo"]["classes"],
                     (config["img_w"], config["img_h"])))

    # DataLoader.
    dataloader = torch.utils.data.DataLoader(COCODataset(
        config["val_path"], (config["img_w"], config["img_h"]),
        is_training=False),
                                             batch_size=config["batch_size"],
                                             shuffle=False,
                                             num_workers=8,
                                             pin_memory=False)

    # Coco Prepare.
    index2category = json.load(open("coco_index2category.json"))

    # Start the eval loop
    logging.info("Start eval.")
    coco_results = []
    coco_img_ids = set([])
    for step, samples in enumerate(dataloader):
        images, labels = samples["image"], samples["label"]
        image_paths, origin_sizes = samples["image_path"], samples[
            "origin_size"]
        with torch.no_grad():
            outputs = net(images)
            output_list = []
            for i in range(3):
                output_list.append(yolo_losses[i](outputs[i]))
            output = torch.cat(output_list, 1)
            batch_detections = non_max_suppression(output,
                                                   config["yolo"]["classes"],
                                                   conf_thres=0.01,
                                                   nms_thres=0.45)
        for idx, detections in enumerate(batch_detections):
            image_id = int(os.path.basename(image_paths[idx])[-16:-4])
            coco_img_ids.add(image_id)
            if detections is not None:
                origin_size = eval(origin_sizes[idx])
                detections = detections.cpu().numpy()
                for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                    x1 = x1 / config["img_w"] * origin_size[0]
                    x2 = x2 / config["img_w"] * origin_size[0]
                    y1 = y1 / config["img_h"] * origin_size[1]
                    y2 = y2 / config["img_h"] * origin_size[1]
                    w = x2 - x1
                    h = y2 - y1
                    coco_results.append({
                        "image_id":
                        image_id,
                        "category_id":
                        index2category[str(int(cls_pred.item()))],
                        "bbox": (float(x1), float(y1), float(w), float(h)),
                        "score":
                        float(conf),
                    })
        logging.info("Now {}/{}".format(step, len(dataloader)))
    save_results_path = "coco_results.json"
    with open(save_results_path, "w") as f:
        json.dump(coco_results,
                  f,
                  sort_keys=True,
                  indent=4,
                  separators=(',', ':'))
    logging.info("Save coco format results to {}".format(save_results_path))

    #  COCO api
    logging.info("Using coco-evaluate tools to evaluate.")
    cocoGt = COCO(config["annotation_path"])
    cocoDt = cocoGt.loadRes(save_results_path)
    cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
    cocoEval.params.imgIds = list(coco_img_ids)  # real imgIds
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
def train():
    global_step = 0
    is_training = True

    # Load and Initialize Network
    net = ModelMain(is_training)
    net.train(is_training)

    # Optimizer and Lr
    optimizer = _get_optimizer(net)
    lr_scheduler = optim.lr_scheduler.StepLR(
        optimizer,
        step_size=lr_decay_step,  #20
        gamma=lr_decay_gamma)  # 0.1

    # Set Data Paraller:
    net = nn.DataParallel(net)
    net = net.cuda()
    logging.info("Net of Cuda is Done!")

    # Restore pretrain model 从预训练模型中恢复
    if pretrain_snapshot:
        logging.info(
            "Load pretrained weights from {}".format(pretrain_snapshot))
        state_dic = torch.load(pretrain_snapshot)
        net.load_state_dict(state_dic)

    yolo_losses = []
    for i in range(3):
        yolo_losses.append(
            YOLOLoss(anchors[i], classes, (img_w, img_h)).cuda())
    print('YOLO_Losses: \n', yolo_losses)

    # DataLoader
    train_data_loader = DATA.DataLoader(dataset=COCODataset(train_path,
                                                            (img_w, img_h),
                                                            is_training=True),
                                        batch_size=batch_size,
                                        shuffle=True,
                                        pin_memory=False)
    # Start the training loop
    logging.info("Start training......")
    for epoch in range(epochs):
        for step, samples in enumerate(train_data_loader):
            images, labels = samples['image'].cuda(), samples["label"].cuda()
            start_time = time.time()
            global_step += 1

            # Forward & Backward
            optimizer.zero_grad()
            outputs = net(images)
            losses_name = ["total_loss", "x", "y", "w", "h", "conf", "cls"]
            losses = [[]] * len(
                losses_name)  # [[]] ---> [[], [], [], [], [], [], []]
            for i in range(3):  # YOLO 3 scales
                _loss_item = yolo_losses[i](outputs[i], labels)
                for j, l in enumerate(_loss_item):
                    # print('j: ', j, 'l: ', l) j: index(0-6); l内容: 总loss, x, y, w, h, conf, cls
                    losses[j].append(l)
            losses = [sum(l) for l in losses]
            loss = losses[0]  # losses[0]为总Loss
            conf = losses[5]
            loss.backward()
            optimizer.step()

            if step > 0 and step % 10 == 0:
                _loss = loss.item()
                _conf = conf.item()
                duration = float(time.time() - start_time)  # 总用时
                example_per_second = batch_size / duration  # 每个样本用时
                lr = optimizer.param_groups[0]['lr']
                logging.info(
                    "epoch [%.3d] iter = %d loss = %.2f conf = %.2f example/sec = %.3f lr = %.5f "
                    % (epoch, step, _loss, _conf, example_per_second, lr))
            if step >= 0 and step % 1000 == 0:
                # net.train(False)
                _save_checkpoint(net.state_dict(), epoch, step)
                # net.train(True)

        lr_scheduler.step()

    _save_checkpoint(net.state_dict(), 100, 9999)
    logging.info("Bye~")
Exemple #12
0
def evaluate(config):
    # checkpoint_paths = {'58': r'\\192.168.25.58\Team-CV\checkpoints\torch_yolov3'}
    checkpoint_paths = {'39': r'C:\Users\Administrator\Desktop\checkpoint/'}
    # checkpoint_paths = {'68': r'E:\github\YOLOv3_PyTorch\evaluate\weights'}
    post_weights = {k: 0 for k in checkpoint_paths.keys()}
    weight_index = {k: 0 for k in checkpoint_paths.keys()}
    time_inter = 10
    dataloader = torch.utils.data.DataLoader(COCODataset(
        config["train_path"], (config["img_w"], config["img_h"]),
        is_training=False,
        is_scene=True),
                                             batch_size=config["batch_size"],
                                             shuffle=False,
                                             num_workers=0,
                                             pin_memory=False,
                                             drop_last=True)  # DataLoader
    net, yolo_losses = build_yolov3(config)
    while 1:
        for key, checkpoint_path in checkpoint_paths.items():
            os.makedirs(checkpoint_path + '/result', exist_ok=True)
            checkpoint_weights = os.listdir(checkpoint_path)
            checkpoint_result = os.listdir(checkpoint_path + '/result')
            checkpoint_result = [
                cweight.split("_")[2][:-4] for cweight in checkpoint_result
                if cweight.endswith('ini')
            ]
            checkpoint_weights = [
                cweight for cweight in checkpoint_weights
                if cweight.endswith('weights')
            ]

            if weight_index[key] >= len(checkpoint_weights):
                print('weight_index[key]', weight_index[key],
                      len(checkpoint_weights))
                time.sleep(time_inter)
                continue
            if post_weights[key] == checkpoint_weights[weight_index[key]]:
                print('post_weights[key]', post_weights[key])
                time.sleep(time_inter)
                continue
            post_weights[key] = checkpoint_weights[weight_index[key]]

            if post_weights[key].endswith("_.weights"):  #检查权重是否保存完
                print("post_weights[key].split('_')",
                      post_weights[key].split('_'))
                time.sleep(time_inter)
                continue
            if checkpoint_weights[weight_index[key]].split(
                    "_")[1][:-8] in checkpoint_result:
                print('weight_index[key] +', weight_index[key])
                weight_index[key] += 1
                time.sleep(time_inter // 20)
                continue
            weight_index[key] += 1
            try:
                if config["pretrain_snapshot"]:  # Restore pretrain model
                    state_dict = torch.load(config["pretrain_snapshot"])
                    logging.info("loading model from %s" %
                                 config["pretrain_snapshot"])
                    net.load_state_dict(state_dict)
                else:
                    state_dict = torch.load(
                        os.path.join(checkpoint_path, post_weights[key]))
                    logging.info(
                        "loading model from %s" %
                        os.path.join(checkpoint_path, post_weights[key]))
                    net.load_state_dict(state_dict)
            except Exception as E:
                print(E)
                time.sleep(time_inter)
                continue
            logging.info("Start eval.")  # Start the eval loop
            n_gt = 0
            correct = 0
            imagepath_list = []
            for step, samples in enumerate(dataloader):
                images, labels, image_paths = samples["image"], samples[
                    "label"], samples["img_path"]
                labels = labels.cuda()
                with torch.no_grad():

                    output = net(images)
                    time1 = datetime.datetime.now()
                    map = mAP(output, labels, 352)

                    # output = non_max_suppression(output, 1, conf_thres=0.5)

                    # output = soft_nms_n(output, score_threshold=0.5)
                    if ((datetime.datetime.now() - time1).seconds > 10):
                        logging.info('Batch %d time is too long ' % (step))
                        n_gt = 1
                        break
                    print(
                        "map time",
                        (datetime.datetime.now() - time1).seconds * 1000 +
                        (datetime.datetime.now() - time1).microseconds // 1000)
                    #  calculate
                    # for sample_i in range(labels.size(0)):
                    #     # Get labels for sample where width is not zero (dummies)
                    #     target_sample = labels[sample_i, labels[sample_i, :, 3] != 0]
                    #     for obj_cls, tx, ty, tw, th in target_sample:
                    #         # Get rescaled gt coordinates
                    #         tx1, tx2 = config["img_w"] * (tx - tw / 2), config["img_w"] * (tx + tw / 2)
                    #         ty1, ty2 = config["img_h"] * (ty - th / 2), config["img_h"] * (ty + th / 2)
                    #         n_gt += 1
                    #         box_gt = torch.cat([coord.unsqueeze(0) for coord in [tx1, ty1, tx2, ty2]]).view(1, -1)
                    #         sample_pred = output[sample_i]
                    #         last_current=correct
                    #         if sample_pred is not None:
                    #             # Iterate through predictions where the class predicted is same as gt
                    #             for x1, y1, x2, y2, conf, obj_conf, obj_pred in sample_pred[sample_pred[:, 6] == obj_cls.cuda()]:
                    #                 box_pred = torch.cat([coord.unsqueeze(0) for coord in [x1, y1, x2, y2]]).view(1, -1)
                    #                 iou = bbox_iou(box_pred, box_gt)
                    #                 if iou >= config["iou_thres"]:
                    #                     correct += 1
                    #                     break
                    #         if last_current==correct and image_paths[sample_i] not in imagepath_list:
                    #             imagepath_list.append(image_paths[sample_i])
                    # print("get result time", time.time() - start)
                    logging.info('Mean Average Precision: %.5f' % map)
                # if n_gt:
                #     logging.info('Batch [%d/%d] err_count:%d mAP: %.5f' % (step, len(dataloader), len(imagepath_list),float(correct / n_gt)))

            # logging.info('Mean Average Precision: %.5f' % float(correct / n_gt))
            logging.info('Mean Average Precision: %.5f' % map)
            # Mean_Average = float(correct / n_gt)
            # ini_name = os.path.join(checkpoint_path+'/result/', '%.4f_%s.ini'%((float(post_weights[key].split("_")[0])+float(correct / n_gt))/2,post_weights[key].replace(".weights","")))
            # write_ini(ini_name, Mean_Average, imagepath_list)
            break
Exemple #13
0
def evaluate(config):
    is_training = False
    # Load and initialize network
    net = ModelMain(config, is_training=is_training)
    net.train(is_training)

    # Set data parallel
    net = nn.DataParallel(net)
    net = net.cuda()

    # Restore pretrain model
    if config["pretrain_snapshot"]:
        state_dict = torch.load(config["pretrain_snapshot"])
        net.load_state_dict(state_dict)
    else:
        logging.warning("missing pretrain_snapshot!!!")

    # YOLO loss with 3 scales
    yolo_losses = []
    for i in range(3):
        yolo_losses.append(YOLOLoss(config["yolo"]["anchors"][i],
                                    config["yolo"]["classes"], (config["img_w"], config["img_h"])))

    # DataLoader
    dataloader = torch.utils.data.DataLoader(dataset=COCODataset(config["val_path"], config["img_w"]),
                                             batch_size=config["batch_size"],
                                             shuffle=True, num_workers=1, pin_memory=False)

    # Start the eval loop
    logging.info("Start eval.")
    n_gt = 0
    correct = 0
    logging.info('%s' % str(dataloader))

    gt_histro={}
    pred_histro = {}
    correct_histro = {}

    for i in range(config["yolo"]["classes"]):
        gt_histro[i] = 1
        pred_histro[i] = 1
        correct_histro[i] = 0

    # images 是一个batch里的全部图片,labels是一个batch里面的全部标签
    for step, (images, labels) in enumerate(dataloader):
        labels = labels.cuda()
        with torch.no_grad():
            outputs = net(images)
            output_list = []
            for i in range(3):
                output_list.append(yolo_losses[i](outputs[i]))

            # 把三个尺度上的预测结果在第1维度(第0维度是batch里的照片,第1维度是一张照片里面的各个预测框,第2维度是各个预测数值)上拼接起来
            batch_output = torch.cat(output_list, dim=1)

            logging.info('%s' % str(batch_output.shape))

            # 进行NMS抑制
            batch_output = non_max_suppression(prediction=batch_output, num_classes=config["yolo"]["classes"], conf_thres=config["conf_thresh"], nms_thres=config["nms_thresh"])
            #  calculate
            for sample_index_in_batch in range(labels.size(0)):
                # fetched img sample in tensor( C(RxGxB) x H x W ), transform to cv2 format in  H x W x C(BxGxR)
                sample_image = images[sample_index_in_batch].numpy()
                sample_image = np.transpose(sample_image, (1, 2, 0))
                sample_image = cv2.cvtColor(sample_image, cv2.COLOR_RGB2BGR)

                logging.debug("fetched img %d size %s" % (sample_index_in_batch, sample_image.shape))
                # Get labels for sample where width is not zero (dummies)(init all labels to zeros in array)
                target_sample = labels[sample_index_in_batch, labels[sample_index_in_batch, :, 3] != 0]
                # get prediction for this sample
                sample_pred = batch_output[sample_index_in_batch]
                if sample_pred is not None:
                    for x1, y1, x2, y2, conf, obj_conf, obj_pred in sample_pred:  # for each prediction box
                        # logging.info("%d" % obj_cls)
                        box_pred = torch.cat([coord.unsqueeze(0) for coord in [x1, y1, x2, y2]]).view(1, -1)
                        sample_image = draw_prediction(sample_image,conf, obj_conf, int(obj_pred), (x1, y1, x2, y2), config)

                # 每一个ground truth的 分类编号obj_cls、相对中心x、相对中心y、相对宽w、相对高h
                for obj_cls, tx, ty, tw, th in target_sample:
                    # Get rescaled gt coordinates
                    # 转化为输入像素尺寸的 左上角像素tx1 ty1,右下角像素tx2 ty2
                    tx1, tx2 = config["img_w"] * (tx - tw / 2), config["img_w"] * (tx + tw / 2)
                    ty1, ty2 = config["img_h"] * (ty - th / 2), config["img_h"] * (ty + th / 2)
                    # 计算ground truth数量,用于统计信息
                    n_gt += 1
                    gt_histro[int(obj_cls)] += 1
                    # 转化为 shape(1,4)的tensor,用来计算IoU
                    box_gt = torch.cat([coord.unsqueeze(0) for coord in [tx1, ty1, tx2, ty2]]).view(1, -1)
                    # logging.info('%s' % str(box_gt.shape))

                    sample_pred = batch_output[sample_index_in_batch]
                    if sample_pred is not None:
                        # Iterate through predictions where the class predicted is same as gt
                        # 对于每一个ground truth,遍历预测结果
                        for x1, y1, x2, y2, conf, obj_conf, obj_pred in sample_pred[sample_pred[:, 6] == obj_cls]:  # 如果当前预测分类 == 当前真实分类
                            #logging.info("%d" % obj_cls)
                            box_pred = torch.cat([coord.unsqueeze(0) for coord in [x1, y1, x2, y2]]).view(1, -1)
                            pred_histro[int(obj_pred)] += 1
                            iou = bbox_iou(box_pred, box_gt)
                            if iou >= config["iou_thresh"]:
                                correct += 1
                                correct_histro[int(obj_pred)] += 1
                                break
        if n_gt:
            types = config["types"]
            reverse_types = {}  # 建立一个反向的types
            for key in types.keys():
                reverse_types[types[key]] = key

            logging.info('Batch [%d/%d] mAP: %.5f' % (step, len(dataloader), float(correct / n_gt)))
            logging.info('mAP Histro:%s' % str([  reverse_types[i] +':'+ str(int(100 * correct_histro[i] / gt_histro[i])) for i in range(config["yolo"]["classes"] )  ]))
            logging.info('Recall His:%s' % str([  reverse_types[i] +':'+ str(int(100 * correct_histro[i] / pred_histro[i])) for i in range(config["yolo"]["classes"]) ]))

    logging.info('Mean Average Precision: %.5f' % float(correct / n_gt))
Exemple #14
0
def evaluate(config):
    is_training = False
    # Load and initialize network
    net = ModelMain(config, is_training=is_training)
    net.train(is_training)

    # Set data parallel
    net = nn.DataParallel(net)
    net = net.cuda()

    # Restore pretrain model
    if config["pretrain_snapshot"]:
        logging.info("Load checkpoint: {}".format(config["pretrain_snapshot"]))
        state_dict = torch.load(config["pretrain_snapshot"])
        net.load_state_dict(state_dict)
    else:
        logging.warning("missing pretrain_snapshot!!!")

    # YOLO loss with 3 scales
    yolo_losses = []
    for i in range(3):
        yolo_losses.append(
            YOLOLoss(config["yolo"]["anchors"][i], config["yolo"]["classes"],
                     (config["img_w"], config["img_h"])))

    # DataLoader.

    dataloader = torch.utils.data.DataLoader(COCODataset(
        config["val_path"], (config["img_w"], config["img_h"]),
        is_training=False),
                                             batch_size=config["batch_size"],
                                             shuffle=False,
                                             num_workers=8,
                                             pin_memory=False)

    # Coco Prepare.
    index2category = json.load(open("coco_index2category.json"))

    # Start the eval loop
    logging.info("Start eval.")
    coco_results = []
    coco_img_ids = set([])
    APs = []

    for step, samples in enumerate(dataloader):
        images, labels = samples["image"], samples["label"]
        image_paths, origin_sizes = samples["image_path"], samples[
            "origin_size"]
        with torch.no_grad():
            outputs = net(images)
            output_list = []

            for i in range(3):
                output_list.append(yolo_losses[i](outputs[i]))
            output = torch.cat(output_list, 1)
            batch_detections = non_max_suppression(output,
                                                   config["yolo"]["classes"],
                                                   conf_thres=0.0001,
                                                   nms_thres=0.45)

        for idx, detections in enumerate(batch_detections):

            correct = []
            annotations = labels[idx, labels[idx, :, 3] != 0]

            image_id = int(os.path.basename(image_paths[idx])[-16:-4])
            coco_img_ids.add(image_id)
            if detections is None:
                if annotations.size(0) != 0:
                    APs.append(0)
                continue
            detections = detections[np.argsort(-detections[:, 4])]

            origin_size = eval(origin_sizes[idx])
            detections = detections.cpu().numpy()
            # ===========================================================================================================================
            # The amount of padding that was added
            pad_x = max(origin_size[1] - origin_size[0],
                        0) * (config["img_w"] / max(origin_size))
            pad_y = max(origin_size[0] - origin_size[1],
                        0) * (config["img_w"] / max(origin_size))
            # Image height and width after padding is removed
            unpad_h = config["img_w"] - pad_y
            unpad_w = config["img_w"] - pad_x
            # ===========================================================================================================================

            if annotations.size(0) == 0:
                correct.extend([0 for _ in range(len(detections))])
            else:
                target_boxes = torch.FloatTensor(annotations[:, 1:].shape)
                target_boxes[:,
                             0] = (annotations[:, 1] - annotations[:, 3] / 2)
                target_boxes[:,
                             1] = (annotations[:, 2] - annotations[:, 4] / 2)
                target_boxes[:,
                             2] = (annotations[:, 1] + annotations[:, 3] / 2)
                target_boxes[:,
                             3] = (annotations[:, 2] + annotations[:, 4] / 2)
                target_boxes *= config["img_w"]

                detected = []

                for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                    pred_bbox = (x1, y1, x2, y2)

                    #x1 = x1 / config["img_w"] * origin_size[0]
                    #x2 = x2 / config["img_w"] * origin_size[0]
                    #y1 = y1 / config["img_h"] * origin_size[1]
                    #y2 = y2 / config["img_h"] * origin_size[1]
                    #w = x2 - x1
                    #h = y2 - y1

                    h = ((y2 - y1) / unpad_h) * origin_size[1]
                    w = ((x2 - x1) / unpad_w) * origin_size[0]
                    y1 = ((y1 - pad_y // 2) / unpad_h) * origin_size[1]
                    x1 = ((x1 - pad_x // 2) / unpad_w) * origin_size[0]

                    coco_results.append({
                        "image_id":
                        image_id,
                        "category_id":
                        index2category[str(int(cls_pred.item()))],
                        "bbox": (float(x1), float(y1), float(w), float(h)),
                        "score":
                        float(conf),
                    })

                    pred_bbox = torch.FloatTensor(pred_bbox).view(1, -1)
                    # Compute iou with target boxes
                    iou = bbox_iou(pred_bbox, target_boxes)
                    # Extract index of largest overlap
                    best_i = np.argmax(iou)
                    # If overlap exceeds threshold and classification is correct mark as correct
                    if iou[best_i] > config[
                            'iou_thres'] and cls_pred == annotations[
                                best_i, 0] and best_i not in detected:
                        correct.append(1)
                        detected.append(best_i)
                    else:
                        correct.append(0)

            true_positives = np.array(correct)
            false_positives = 1 - true_positives

            # Compute cumulative false positives and true positives
            false_positives = np.cumsum(false_positives)
            true_positives = np.cumsum(true_positives)

            # Compute recall and precision at all ranks
            recall = true_positives / annotations.size(0) if annotations.size(
                0) else true_positives
            precision = true_positives / np.maximum(
                true_positives + false_positives,
                np.finfo(np.float64).eps)

            # Compute average precision
            AP = compute_ap(recall, precision)
            APs.append(AP)

            print("+ Sample [%d/%d] AP: %.4f (%.4f)" %
                  (len(APs), 5000, AP, np.mean(APs)))
        logging.info("Now {}/{}".format(step, len(dataloader)))
    print("Mean Average Precision: %.4f" % np.mean(APs))

    save_results_path = "coco_results.json"
    with open(save_results_path, "w") as f:
        json.dump(coco_results,
                  f,
                  sort_keys=True,
                  indent=4,
                  separators=(',', ':'))
    logging.info("Save coco format results to {}".format(save_results_path))

    #  COCO api
    logging.info("Using coco-evaluate tools to evaluate.")
    cocoGt = COCO(config["annotation_path"])
    cocoDt = cocoGt.loadRes(save_results_path)
    cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
    cocoEval.params.imgIds = list(coco_img_ids)  # real imgIds
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Exemple #15
0
def train(config):
    config["global_step"] = config.get("start_step", 0)
    is_training = False if config.get("export_onnx") else True

    anchors = [int(x) for x in config["yolo"]["anchors"].split(",")]
    anchors = [[[anchors[i], anchors[i + 1]], [anchors[i + 2], anchors[i + 3]],
                [anchors[i + 4], anchors[i + 5]]]
               for i in range(0, len(anchors), 6)]
    anchors.reverse()
    config["yolo"]["anchors"] = []
    for i in range(3):
        config["yolo"]["anchors"].append(anchors[i])
    # Load and initialize network
    net = ModelMain(config, is_training=is_training)
    net.train(is_training)

    # Optimizer and learning rate
    optimizer = _get_optimizer(config, net)
    t_max = 50
    # lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=t_max,eta_min=1e-05)
    lr_scheduler = optim.lr_scheduler.StepLR(
        optimizer,
        step_size=config["lr"]["decay_step"],
        gamma=config["lr"]["decay_gamma"])

    # Set data parallel
    net = nn.DataParallel(net)
    net = net.cuda()
    # Restore pretrain model
    if config["pretrain_snapshot"]:
        logging.info("Load pretrained weights from {}".format(
            config["pretrain_snapshot"]))
        state_dict = torch.load(config["pretrain_snapshot"])
        net.load_state_dict(state_dict)

    # Only export onnx
    # if config.get("export_onnx"):
    # real_model = net.module
    # real_model.eval()
    # dummy_input = torch.randn(8, 3, config["img_h"], config["img_w"]).cuda()
    # save_path = os.path.join(config["sub_working_dir"], "pytorch.onnx")
    # logging.info("Exporting onnx to {}".format(save_path))
    # torch.onnx.export(real_model, dummy_input, save_path, verbose=False)
    # logging.info("Done. Exiting now.")
    # sys.exit()

    # Evaluate interface
    # if config["evaluate_type"]:
    # logging.info("Using {} to evaluate model.".format(config["evaluate_type"]))
    # evaluate_func = importlib.import_module(config["evaluate_type"]).run_eval
    # config["online_net"] = net

    # YOLO loss with 3 scales

    # DataLoader
    dataloader = torch.utils.data.DataLoader(
        COCODataset(config["train_path"], (config["img_w"], config["img_h"]),
                    is_training=True,
                    is_scene=True),
        batch_size=config["batch_size"] * config["parallels"],
        shuffle=True,
        drop_last=True,
        num_workers=0,
        pin_memory=True)

    # Start the training loop
    logging.info("Start training.")
    dataload_len = len(dataloader)
    best_acc = 0.2
    last_recall = 0.6
    for epoch in range(config["epochs"]):
        recall = 0
        mini_step = 0
        for step, samples in enumerate(dataloader):
            start = time.time()
            images, labels = samples["image"], samples["label"]
            config["global_step"] += 1
            # Forward and backward
            optimizer.zero_grad()
            losses = net(images.cuda(), labels.cuda())

            # current_recall = mAP(detections, labels, config["img_w"])
            # current_recall = np.mean(current_recall)

            if config["parallels"] > 1:
                losses = losses.view(config["parallels"], 8)[0] + losses.view(
                    config["parallels"], 8)[1]
            loss = losses[0]
            if epoch > 0:
                loss = loss * 20
            current_recall = float(losses[7] / 3 / config["parallels"])
            if last_recall < 0.65:
                loss = loss + 20 * (1 - current_recall)  # * 0.8
            else:
                loss = loss + 20 * (1 - current_recall)

            loss.backward()
            optimizer.step()
            _loss = loss.item()
            # example_per_second = config["batch_size"] / duration
            lr = optimizer.param_groups[0]['lr']
            #
            strftime = datetime.datetime.now().strftime("%H:%M:%S")
            # # if (losses[7] / 3 >= recall / (step + 1)):#mini_batch为0走这里
            recall += current_recall
            print(
                '%s [Epoch %d/%d,batch %03d/%d loss:x %.5f,y %.5f,w %.5f,h %.5f,conf %.5f,cls %.5f,total %.5f,rec %.3f,avrec %.3f %.3f]'
                % (strftime, epoch, config["epochs"], step, dataload_len,
                   losses[1], losses[2], losses[3], losses[4], losses[5],
                   losses[6], _loss, current_recall, recall / (step + 1), lr))
        last_recall = recall / len(dataloader)
        if recall / len(dataloader) > best_acc:
            best_acc = recall / len(dataloader)
            torch.save(
                net.state_dict(), '%s/%.4f_%04d.weights' %
                (checkpoint_dir, recall / len(dataloader), epoch))

        lr_scheduler.step()
        # if epoch % (lr_scheduler.T_max + next_need) == (lr_scheduler.T_max + next_need - 1):
        #     next_need += float(lr_scheduler.T_max)
        #     lr_scheduler.T_max += 2
        #     lr_scheduler.last_epoch = 0
        # lr_scheduler.base_lrs*=0.98
        # lr_scheduler.base_lrs[0] *= 0.95
        # lr_scheduler.base_lrs[1] *= 0.95

        # net.train(is_training)
        # torch.cuda.empty_cache()
    # net.train(True)
    logging.info("Bye bye")