Example #1
0
def train(opt):
    if torch.cuda.is_available():
        torch.cuda.manual_seed(123)
    else:
        torch.manual_seed(123)
    learning_rate_schedule = {"0": 1e-5, "5": 1e-4, "80": 1e-5, "110": 1e-6}

    twoShapesAnchors = [(1.0, 1.0), (3.0, 3.0), (6, 6), (9.0, 9.0),
                        (10.0, 5.0), (5.0, 10.0)]
    twoShapesAnchors = [(1.3221, 1.73145), (3.19275, 4.00944),
                        (5.05587, 8.09892), (9.47112, 4.84053),
                        (11.2364, 10.0071)]
    training_params = {
        "batch_size": opt.batch_size,
        "shuffle": True,
        "drop_last": True,
        "collate_fn": custom_collate_fn
    }

    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": custom_collate_fn
    }

    training_set = TwoShapesDataset(root_path=opt.data_path,
                                    mode=opt.train_set,
                                    trainingSet="annotations",
                                    image_size=opt.image_size)
    training_generator = DataLoader(training_set, **training_params)

    test_set = TwoShapesDataset(root_path=opt.data_path,
                                mode=opt.test_set,
                                trainingSet="annotations",
                                image_size=opt.image_size,
                                is_training=False)
    test_generator = DataLoader(test_set, **test_params)

    if torch.cuda.is_available():
        if opt.pre_trained_model_type == "model":
            model = torch.load(opt.pre_trained_model_path)
        elif opt.pre_trained_model_type == "params":
            model = Yolo(training_set.num_classes, twoShapesAnchors)
            model.load_state_dict(torch.load(opt.pre_trained_model_path))
        else:
            print("Just loading the model definintion")
            model = Yolo(training_set.num_classes, twoShapesAnchors)
    else:
        if opt.pre_trained_model_type == "model":
            model = torch.load(opt.pre_trained_model_path,
                               map_location=lambda storage, loc: storage)
        elif opt.pre_trained_model_type == "params":
            model = Yolo(training_set.num_classes, twoShapesAnchors)
            model.load_state_dict(
                torch.load(opt.pre_trained_model_path,
                           map_location=lambda storage, loc: storage))
    # The following line will re-initialize weight for the last layer, which is useful
    # when you want to retrain the model based on my trained weights. if you uncomment it,
    # you will see the loss is already very small at the beginning.
    # nn.init.normal_(list(model.modules())[:].weight, 0, 0.01)

    # model.apply(weights_init)

    log_path = os.path.join(opt.log_path, "{}".format("test1"))
    if os.path.isdir(log_path):
        shutil.rmtree(log_path)
    os.makedirs(log_path)
    writer = SummaryWriter(log_path)
    if torch.cuda.is_available():
        writer.add_graph(
            model.cpu(),
            torch.rand(opt.batch_size, 3, opt.image_size, opt.image_size))
        model.cuda()
    else:
        writer.add_graph(
            model, torch.rand(opt.batch_size, 3, opt.image_size,
                              opt.image_size))
    criterion = YoloLoss(training_set.num_classes, model.anchors,
                         opt.reduction)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=1e-5,
                                momentum=opt.momentum,
                                weight_decay=opt.decay)
    best_loss = 1e10
    best_epoch = 0
    model.train()
    num_iter_per_epoch = len(training_generator)
    for epoch in range(opt.num_epoches):
        if str(epoch) in learning_rate_schedule.keys():
            for param_group in optimizer.param_groups:
                param_group['lr'] = learning_rate_schedule[str(epoch)]
        for iter, batch in enumerate(training_generator):
            image, label = batch
            if torch.cuda.is_available():
                image = Variable(image.cuda(), requires_grad=True)
            else:
                image = Variable(image, requires_grad=True)
            optimizer.zero_grad()
            logits = model(image)
            loss, loss_coord, loss_conf, loss_cls = criterion(logits, label)
            loss.backward()
            optimizer.step()
            print(
                "Epoch: {}/{}, Iteration: {}/{}, Lr: {}, Loss:{:.2f} (Coord:{:.2f} Conf:{:.2f} Cls:{:.2f})"
                .format(epoch + 1, opt.num_epoches, iter + 1,
                        num_iter_per_epoch, optimizer.param_groups[0]['lr'],
                        loss, loss_coord, loss_conf, loss_cls))
            writer.add_scalar('Train/Total_loss', loss,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Coordination_loss', loss_coord,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Confidence_loss', loss_conf,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Class_loss', loss_cls,
                              epoch * num_iter_per_epoch + iter)
        if epoch % opt.test_interval == 0:
            model.eval()
            loss_ls = []
            loss_coord_ls = []
            loss_conf_ls = []
            loss_cls_ls = []
            for te_iter, te_batch in enumerate(test_generator):
                te_image, te_label = te_batch
                num_sample = len(te_label)
                if torch.cuda.is_available():
                    te_image = te_image.cuda()
                with torch.no_grad():
                    te_logits = model(te_image)
                    batch_loss, batch_loss_coord, batch_loss_conf, batch_loss_cls = criterion(
                        te_logits, te_label)
                loss_ls.append(batch_loss * num_sample)
                loss_coord_ls.append(batch_loss_coord * num_sample)
                loss_conf_ls.append(batch_loss_conf * num_sample)
                loss_cls_ls.append(batch_loss_cls * num_sample)
            te_loss = sum(loss_ls) / test_set.__len__()
            te_coord_loss = sum(loss_coord_ls) / test_set.__len__()
            te_conf_loss = sum(loss_conf_ls) / test_set.__len__()
            te_cls_loss = sum(loss_cls_ls) / test_set.__len__()
            print(
                "Epoch: {}/{}, Lr: {}, Loss:{:.2f} (Coord:{:.2f} Conf:{:.2f} Cls:{:.2f})"
                .format(epoch + 1, opt.num_epoches,
                        optimizer.param_groups[0]['lr'], te_loss,
                        te_coord_loss, te_conf_loss, te_cls_loss))
            writer.add_scalar('Test/Total_loss', te_loss, epoch)
            writer.add_scalar('Test/Coordination_loss', te_coord_loss, epoch)
            writer.add_scalar('Test/Confidence_loss', te_conf_loss, epoch)
            writer.add_scalar('Test/Class_loss', te_cls_loss, epoch)
            model.train()
            if te_loss + opt.es_min_delta < best_loss:
                best_loss = te_loss
                best_epoch = epoch
                # torch.save(model, opt.saved_path + os.sep + "trained_yolo_coco")
                torch.save(
                    model.state_dict(),
                    opt.saved_path + os.sep + "only_params_trained_yolo_coco")
                torch.save(model, opt.saved_path + os.sep + opt.save_file)

            # Early stopping
            if epoch - best_epoch > opt.es_patience > 0:
                print(
                    "Stop training at epoch {}. The lowest loss achieved is {}"
                    .format(epoch, te_loss))
                break
    writer.export_scalars_to_json(log_path + os.sep + "all_logs.json")
    writer.close()
Example #2
0
def train(opt):
    if torch.cuda.is_available():
        torch.cuda.manual_seed(123)
    else:
        torch.manual_seed(123)
    learning_rate_schedule = {"0": 1e-5, "5": 1e-4, "80": 1e-5, "110": 1e-6}
    training_params = {
        "batch_size": opt.batch_size,
        "shuffle": True,
        "drop_last": True,
        "collate_fn": custom_collate_fn,
        "num_workers": 8
    }

    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": custom_collate_fn,
        "num_workers": 8
    }

    training_set = VOCDataset('data/norm_train_image/',
                              'data/train_labels.csv',
                              data_augmentation=True)
    training_generator = DataLoader(training_set, **training_params)

    test_set = VOCDataset('data/norm_train_image/',
                          'data/val_labels.csv',
                          data_augmentation=False)
    test_generator = DataLoader(test_set, **test_params)

    model = Yolo(1)  #num_classes = 1
    # model.load_state_dict(torch.load(opt.pre_trained_model_path))

    # The following line will re-initialize weight for the last layer, which is useful
    # when you want to retrain the model based on my trained weights. if you uncomment it,
    # you will see the loss is already very small at the beginning.

    nn.init.normal_(list(model.modules())[-1].weight, 0, 0.01)
    log_path = os.path.join(opt.log_path, "{}".format(opt.year))
    if os.path.isdir(log_path):
        shutil.rmtree(log_path)
    os.makedirs(log_path)
    writer = SummaryWriter(log_path)
    if torch.cuda.is_available():
        writer.add_graph(
            model.cpu(),
            torch.rand(opt.batch_size, 3, opt.image_size, opt.image_size))
        model.cuda()
    else:
        writer.add_graph(
            model, torch.rand(opt.batch_size, 3, opt.image_size,
                              opt.image_size))
    criterion = YoloLoss(training_set.num_classes, model.anchors,
                         opt.reduction)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=1e-5,
                                momentum=opt.momentum,
                                weight_decay=opt.decay)

    best_loss = 1e10
    best_epoch = 0
    model.train()
    num_iter_per_epoch = len(training_generator)
    for epoch in range(opt.num_epoches):
        if str(epoch) in learning_rate_schedule.keys():
            for param_group in optimizer.param_groups:
                param_group['lr'] = learning_rate_schedule[str(epoch)]

        epoch_start_time = time.time()
        train_loss = 0

        for iter, batch in enumerate(training_generator):
            image, label = batch
            if torch.cuda.is_available():
                image = Variable(image.cuda(), requires_grad=True)
            else:
                image = Variable(image, requires_grad=True)
            optimizer.zero_grad()
            logits = model(image)
            loss, loss_coord, loss_conf, loss_cls = criterion(logits, label)
            # print('***loss***', loss)
            loss.backward()
            optimizer.step()
            # print("Epoch: {}/{}, Iteration: {}/{}, Lr: {}, Loss:{:.2f} (Coord:{:.2f} Conf:{:.2f} Cls:{:.2f})".format(
            #     epoch + 1,
            #     opt.num_epoches,
            #     iter + 1,
            #     num_iter_per_epoch,
            #     optimizer.param_groups[0]['lr'],
            #     loss,
            #     loss_coord,
            #     loss_conf,
            #     loss_cls))

            train_loss += float(loss.detach().cpu().data.numpy())

            progress = (u"\u2588" * (int(
                float(iter + 1) / num_iter_per_epoch * 40))).ljust(40, '.')
            msg = '[%03d/%03d] %2.2f sec(s) |%s| Current Loss:%.2f' % (
                iter + 1, num_iter_per_epoch,
                (time.time() - epoch_start_time), progress, loss)
            print(msg, end='', flush=True)
            back = '\b' * len(msg)
            print(back, end='', flush=True)

            writer.add_scalar('Train/Total_loss', loss,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Coordination_loss', loss_coord,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Confidence_loss', loss_conf,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Class_loss', loss_cls,
                              epoch * num_iter_per_epoch + iter)

        if epoch % opt.test_interval == 0:
            model.eval()
            loss_ls = []
            loss_coord_ls = []
            loss_conf_ls = []
            loss_cls_ls = []
            for te_iter, te_batch in enumerate(test_generator):
                te_image, te_label = te_batch
                num_sample = len(te_label)
                if torch.cuda.is_available():
                    te_image = te_image.cuda()
                with torch.no_grad():
                    te_logits = model(te_image)
                    batch_loss, batch_loss_coord, batch_loss_conf, batch_loss_cls = criterion(
                        te_logits, te_label)
                loss_ls.append(batch_loss * num_sample)
                loss_coord_ls.append(batch_loss_coord * num_sample)
                loss_conf_ls.append(batch_loss_conf * num_sample)
                loss_cls_ls.append(batch_loss_cls * num_sample)

            te_loss = sum(loss_ls) / test_set.__len__()
            te_coord_loss = sum(loss_coord_ls) / test_set.__len__()
            te_conf_loss = sum(loss_conf_ls) / test_set.__len__()
            te_cls_loss = sum(loss_cls_ls) / test_set.__len__()

            print(
                "\nEpoch: {}/{}, Lr: {}, Train Loss:{:.2f} Val Loss:{:.2f} (Coord:{:.2f} Conf:{:.2f} Cls:{:.2f})"
                .format(epoch + 1, opt.num_epoches,
                        optimizer.param_groups[0]['lr'],
                        train_loss / opt.num_epoches, te_loss, te_coord_loss,
                        te_conf_loss, te_cls_loss))

            writer.add_scalar('Test/Total_loss', te_loss, epoch)
            writer.add_scalar('Test/Coordination_loss', te_coord_loss, epoch)
            writer.add_scalar('Test/Confidence_loss', te_conf_loss, epoch)
            writer.add_scalar('Test/Class_loss', te_cls_loss, epoch)

            model.train()
            if te_loss + opt.es_min_delta < best_loss:
                best_loss = te_loss
                best_epoch = epoch
                # torch.save(model, opt.saved_path + os.sep + "trained_yolo_voc")
                torch.save(
                    model.state_dict(),
                    opt.saved_path + os.sep + "only_params_trained_yolo_voc")
                torch.save(
                    model,
                    opt.saved_path + os.sep + "whole_model_trained_yolo_voc")
                print("***Best Model Updated!***")
            # Early stopping
            if epoch - best_epoch > opt.es_patience > 0:
                print(
                    "Stop training at epoch {}. The lowest loss achieved is {}"
                    .format(epoch, te_loss))
                break

    writer.export_scalars_to_json(log_path + os.sep + "all_logs.json")
    writer.close()
Example #3
0
def train(opt):
    if not os.path.isdir(opt.saved_path):
        os.makedirs(opt.saved_path)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_devices
    if torch.cuda.is_available():
        torch.cuda.manual_seed(123)
    else:
        torch.manual_seed(123)
    learning_rate_schedule = {"0": 1e-5, "5": 1e-4, "80": 1e-5, "110": 1e-6}
    training_params = {
        "batch_size": opt.batch_size,
        "shuffle": True,
        "drop_last": True,
        "collate_fn": custom_collate_fn
    }

    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": custom_collate_fn
    }

    training_set = BOTDataset_scene(opt.data_path,
                                    opt.train_set,
                                    opt.image_size,
                                    scene=2)
    # mmm = training_set[0]
    # print(1)
    training_generator = DataLoader(training_set, **training_params)

    test_set = BOTDataset_scene(opt.data_path,
                                opt.test_set,
                                opt.image_size,
                                scene=2,
                                is_training=False)
    test_generator = DataLoader(test_set, **test_params)

    if torch.cuda.is_available():
        if opt.pre_trained_model_type == "model":
            model = torch.load(opt.pre_trained_model_path)
        else:
            # model = Yolo(training_set.num_classes)
            # model.load_state_dict(torch.load(opt.pre_trained_model_path))
            print('总类别数据:', training_set.num_classes)
            model = Yolo_Person2(training_set.num_classes)
            pretrained_dict = torch.load(opt.pre_trained_model_path)
            model_dict = model.state_dict()
            pretrained_dict = {
                k: v
                for k, v in pretrained_dict.items() if k in model_dict
            }
            model_dict.update(pretrained_dict)
            model.load_state_dict(model_dict)
            print('Success Init BOT single class model')

    else:
        if opt.pre_trained_model_type == "model":
            model = torch.load(opt.pre_trained_model_path,
                               map_location=lambda storage, loc: storage)
        else:
            model = Yolo(training_set.num_classes)
            model.load_state_dict(
                torch.load(opt.pre_trained_model_path,
                           map_location=lambda storage, loc: storage))
    # The following line will re-initialize weight for the last layer, which is useful
    # when you want to retrain the model based on my trained weights. if you uncomment it,
    # you will see the loss is already very small at the beginning.
    nn.init.normal_(list(model.modules())[-1].weight, 0, 0.01)
    log_path = os.path.join(opt.log_path, "{}".format(opt.year))
    if os.path.isdir(log_path):
        shutil.rmtree(log_path)
    os.makedirs(log_path)
    writer = SummaryWriter(log_path)
    if torch.cuda.is_available():
        writer.add_graph(
            model.cpu(),
            torch.rand(opt.batch_size, 3, opt.image_size, opt.image_size))
        model.cuda()
    else:
        writer.add_graph(
            model, torch.rand(opt.batch_size, 3, opt.image_size,
                              opt.image_size))
    # print('training_set.num_classes',training_set.num_classes)
    criterion = YoloLoss(training_set.num_classes, model.anchors,
                         opt.reduction)
    # criterion = YoloLoss(training_set.num_classes, model.anchors, opt.reduction)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=1e-5,
                                momentum=opt.momentum,
                                weight_decay=opt.decay)
    best_loss = 1e10
    best_epoch = 0
    model.train()
    num_iter_per_epoch = len(training_generator)
    for epoch in range(opt.num_epoches):
        if str(epoch) in learning_rate_schedule.keys():
            for param_group in optimizer.param_groups:
                param_group['lr'] = learning_rate_schedule[str(epoch)]
        for iter, batch in enumerate(training_generator):
            image, label = batch
            if torch.cuda.is_available():
                image = Variable(image.cuda(), requires_grad=True)
            else:
                image = Variable(image, requires_grad=True)
            optimizer.zero_grad()
            logits = model(image)
            # print('logits.shape',logits.shape)
            # print('label.shape',len(label))
            # print('label',label)
            loss, loss_coord, loss_conf, loss_cls = criterion(logits, label)
            loss.backward()
            optimizer.step()
            print(
                "Epoch: {}/{}, Iteration: {}/{}, Lr: {}, Loss:{:.2f} (Coord:{:.2f} Conf:{:.2f} Cls:{:.2f})"
                .format(epoch + 1, opt.num_epoches, iter + 1,
                        num_iter_per_epoch, optimizer.param_groups[0]['lr'],
                        loss, loss_coord, loss_conf, loss_cls))
            writer.add_scalar('Train/Total_loss', loss,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Coordination_loss', loss_coord,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Confidence_loss', loss_conf,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Class_loss', loss_cls,
                              epoch * num_iter_per_epoch + iter)
        if epoch % opt.test_interval == 0:
            model.eval()
            loss_ls = []
            loss_coord_ls = []
            loss_conf_ls = []
            loss_cls_ls = []
            for te_iter, te_batch in enumerate(test_generator):
                te_image, te_label = te_batch
                num_sample = len(te_label)
                if torch.cuda.is_available():
                    te_image = te_image.cuda()
                with torch.no_grad():
                    te_logits = model(te_image)
                    batch_loss, batch_loss_coord, batch_loss_conf, batch_loss_cls = criterion(
                        te_logits, te_label)
                loss_ls.append(batch_loss * num_sample)
                loss_coord_ls.append(batch_loss_coord * num_sample)
                loss_conf_ls.append(batch_loss_conf * num_sample)
                loss_cls_ls.append(batch_loss_cls * num_sample)
            te_loss = sum(loss_ls) / test_set.__len__()
            te_coord_loss = sum(loss_coord_ls) / test_set.__len__()
            te_conf_loss = sum(loss_conf_ls) / test_set.__len__()
            te_cls_loss = sum(loss_cls_ls) / test_set.__len__()
            print(
                "Epoch: {}/{}, Lr: {}, Loss:{:.2f} (Coord:{:.2f} Conf:{:.2f} Cls:{:.2f})"
                .format(epoch + 1, opt.num_epoches,
                        optimizer.param_groups[0]['lr'], te_loss,
                        te_coord_loss, te_conf_loss, te_cls_loss))
            writer.add_scalar('Test/Total_loss', te_loss, epoch)
            writer.add_scalar('Test/Coordination_loss', te_coord_loss, epoch)
            writer.add_scalar('Test/Confidence_loss', te_conf_loss, epoch)
            writer.add_scalar('Test/Class_loss', te_cls_loss, epoch)
            model.train()
            if te_loss + opt.es_min_delta < best_loss:
                best_loss = te_loss
                best_epoch = epoch
                # torch.save(model, opt.saved_path + os.sep + "trained_yolo_voc")
                torch.save(
                    model.state_dict(),
                    opt.saved_path + os.sep + "only_params_trained_yolo_bot")
                torch.save(
                    model,
                    opt.saved_path + os.sep + "whole_model_trained_yolo_bot")

            # Early stopping
            if epoch - best_epoch > opt.es_patience > 0:
                print(
                    "Stop training at epoch {}. The lowest loss achieved is {}"
                    .format(epoch, te_loss))
                break
    writer.export_scalars_to_json(log_path + os.sep + "all_logs.json")
    writer.close()
Example #4
0
def train(opt):
    if torch.cuda.is_available():
        torch.cuda.manual_seed(123)
    else:
        torch.manual_seed(123)
    learning_rate_schedule = {"0": 1e-5, "5": 1e-4, "80": 1e-5, "110": 1e-6}
    training_params = {
        "batch_size": opt.batch_size,
        "shuffle": True,
        "drop_last": True,
        "collate_fn": custom_collate_fn
    }

    test_params = {
        "batch_size": opt.batch_size,
        "shuffle": False,
        "drop_last": False,
        "collate_fn": custom_collate_fn
    }

    training_set = VOCDataset(opt.data_path, opt.year, opt.train_set,
                              opt.image_size)
    training_generator = DataLoader(training_set, **training_params)

    test_set = VOCDataset(opt.data_path,
                          opt.year,
                          opt.test_set,
                          opt.image_size,
                          is_training=False)
    test_generator = DataLoader(test_set, **test_params)

    model = Yolo(training_set.num_classes)
    log_path = os.path.join(opt.log_path, "{}".format(opt.year))
    if os.path.isdir(log_path):
        shutil.rmtree(log_path)
    os.makedirs(log_path)
    writer = SummaryWriter(log_path)
    if torch.cuda.is_available():
        writer.add_graph(
            model.cpu(),
            torch.rand(opt.batch_size, 3, opt.image_size, opt.image_size))
        model.cuda()
    else:
        writer.add_graph(
            model, torch.rand(opt.batch_size, 3, opt.image_size,
                              opt.image_size))
    criterion = YoloLoss(training_set.num_classes, model.anchors,
                         opt.reduction)
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=1e-5,
                                momentum=opt.momentum,
                                weight_decay=opt.decay)
    best_loss = 1e10
    best_epoch = 0
    model.train()
    num_iter_per_epoch = len(training_generator)
    for epoch in range(opt.num_epoches):
        if str(epoch) in learning_rate_schedule.keys():
            for param_group in optimizer.param_groups:
                param_group['lr'] = learning_rate_schedule[str(epoch)]
        for iter, batch in enumerate(training_generator):
            image, label = batch
            if torch.cuda.is_available():
                image = Variable(image.cuda(), requires_grad=True)
            else:
                image = Variable(image, requires_grad=True)
            optimizer.zero_grad()
            logits = model(image)
            loss, loss_coord, loss_conf, loss_cls = criterion(logits, label)
            loss.backward()
            optimizer.step()
            print(
                "Epoch: {}/{}, Iteration: {}/{}, Lr: {}, Loss:{:.2f} (Coord:{:.2f} Conf:{:.2f} Cls:{:.2f})"
                .format(epoch + 1, opt.num_epoches, iter + 1,
                        num_iter_per_epoch, optimizer.param_groups[0]['lr'],
                        loss, loss_coord, loss_conf, loss_cls))
            writer.add_scalar('Train/Total_loss', loss,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Coordination_loss', loss_coord,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Confidence_loss', loss_conf,
                              epoch * num_iter_per_epoch + iter)
            writer.add_scalar('Train/Class_loss', loss_cls,
                              epoch * num_iter_per_epoch + iter)
        if epoch % opt.test_interval == 0:
            model.eval()
            loss_ls = []
            loss_coord_ls = []
            loss_conf_ls = []
            loss_cls_ls = []
            for te_iter, te_batch in enumerate(test_generator):
                te_image, te_label = te_batch
                num_sample = len(te_label)
                if torch.cuda.is_available():
                    te_image = te_image.cuda()
                with torch.no_grad():
                    te_logits = model(te_image)
                    batch_loss, batch_loss_coord, batch_loss_conf, batch_loss_cls = criterion(
                        te_logits, te_label)
                loss_ls.append(batch_loss * num_sample)
                loss_coord_ls.append(batch_loss_coord * num_sample)
                loss_conf_ls.append(batch_loss_conf * num_sample)
                loss_cls_ls.append(batch_loss_cls * num_sample)
            te_loss = sum(loss_ls) / test_set.__len__()
            te_coord_loss = sum(loss_coord_ls) / test_set.__len__()
            te_conf_loss = sum(loss_conf_ls) / test_set.__len__()
            te_cls_loss = sum(loss_cls_ls) / test_set.__len__()
            print(
                "Epoch: {}/{}, Lr: {}, Loss:{:.2f} (Coord:{:.2f} Conf:{:.2f} Cls:{:.2f})"
                .format(epoch + 1, opt.num_epoches,
                        optimizer.param_groups[0]['lr'], te_loss,
                        te_coord_loss, te_conf_loss, te_cls_loss))
            writer.add_scalar('Test/Total_loss', te_loss, epoch)
            writer.add_scalar('Test/Coordination_loss', te_coord_loss, epoch)
            writer.add_scalar('Test/Confidence_loss', te_conf_loss, epoch)
            writer.add_scalar('Test/Class_loss', te_cls_loss, epoch)
            model.train()
            if te_loss + opt.es_min_delta < best_loss:
                best_loss = te_loss
                best_epoch = epoch
                # torch.save(model, opt.saved_path + os.sep + "trained_yolo_voc")
                torch.save(
                    model.state_dict(),
                    opt.saved_path + os.sep + "only_params_trained_yolo_voc")
                torch.save(
                    model,
                    opt.saved_path + os.sep + "whole_model_trained_yolo_voc")

            # Early stopping
            if epoch - best_epoch > opt.es_patience > 0:
                print(
                    "Stop training at epoch {}. The lowest loss achieved is {}"
                    .format(epoch, te_loss))
                break
    writer.export_scalars_to_json(log_path + os.sep + "all_logs.json")
    writer.close()