def main(parser_data):
    device = torch.device(
        parser_data.device if torch.cuda.is_available() else "cpu")
    print("Using {} device training.".format(device.type))

    if not os.path.exists("save_weights"):
        os.mkdir("save_weights")

    results_file = "results{}.txt".format(
        datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))

    data_transform = {
        "train":
        transform.Compose([
            transform.SSDCropping(),
            transform.Resize(),
            transform.ColorJitter(),
            transform.ToTensor(),
            transform.RandomHorizontalFlip(),
            transform.Normalization(),
            transform.AssignGTtoDefaultBox()
        ]),
        "val":
        transform.Compose([
            transform.Resize(),
            transform.ToTensor(),
            transform.Normalization()
        ])
    }

    VOC_root = parser_data.data_path
    # check voc root
    if os.path.exists(os.path.join(VOC_root, "VOCdevkit")) is False:
        raise FileNotFoundError(
            "VOCdevkit dose not in path:'{}'.".format(VOC_root))

    train_dataset = VOC2012DataSet(VOC_root,
                                   data_transform['train'],
                                   train_set='train.txt')
    # 注意训练时,batch_size必须大于1
    batch_size = parser_data.batch_size
    assert batch_size > 1, "batch size must be greater than 1"
    # 防止最后一个batch_size=1,如果最后一个batch_size=1就舍去
    drop_last = True if len(train_dataset) % batch_size == 1 else False
    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0,
              8])  # number of workers
    print('Using %g dataloader workers' % nw)
    train_data_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=nw,
        collate_fn=train_dataset.collate_fn,
        drop_last=drop_last)

    val_dataset = VOC2012DataSet(VOC_root,
                                 data_transform['val'],
                                 train_set='val.txt')
    val_data_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=nw,
        collate_fn=train_dataset.collate_fn)

    model = create_model(num_classes=args.num_classes + 1, device=device)

    # define optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.0005,
                                momentum=0.9,
                                weight_decay=0.0005)
    # learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=5,
                                                   gamma=0.3)

    # 如果指定了上次训练保存的权重文件地址,则接着上次结果接着训练
    if parser_data.resume != "":
        checkpoint = torch.load(parser_data.resume)
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        parser_data.start_epoch = checkpoint['epoch'] + 1
        print("the training process from epoch{}...".format(
            parser_data.start_epoch))

    train_loss = []
    learning_rate = []
    val_map = []

    # 提前加载验证集数据,以免每次验证时都要重新加载一次数据,节省时间
    val_data = get_coco_api_from_dataset(val_data_loader.dataset)
    for epoch in range(parser_data.start_epoch, parser_data.epochs):
        mean_loss, lr = utils.train_one_epoch(model=model,
                                              optimizer=optimizer,
                                              data_loader=train_data_loader,
                                              device=device,
                                              epoch=epoch,
                                              print_freq=50)
        train_loss.append(mean_loss.item())
        learning_rate.append(lr)

        # update learning rate
        lr_scheduler.step()

        coco_info = utils.evaluate(model=model,
                                   data_loader=val_data_loader,
                                   device=device,
                                   data_set=val_data)

        # write into txt
        with open(results_file, "a") as f:
            result_info = [
                str(round(i, 4)) for i in coco_info + [mean_loss.item(), lr]
            ]
            txt = "epoch:{} {}".format(epoch, '  '.join(result_info))
            f.write(txt + "\n")

        val_map.append(coco_info[1])  # pascal mAP

        # save weights
        save_files = {
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'lr_scheduler': lr_scheduler.state_dict(),
            'epoch': epoch
        }
        torch.save(save_files, "./save_weights/ssd300-{}.pth".format(epoch))

    # plot loss and lr curve
    if len(train_loss) != 0 and len(learning_rate) != 0:
        from plot_curve import plot_loss_and_lr
        plot_loss_and_lr(train_loss, learning_rate)

    # plot mAP curve
    if len(val_map) != 0:
        from plot_curve import plot_map
        plot_map(val_map)
def main(args):
    print(args)
    # mp.spawn(main_worker, args=(args,), nprocs=args.world_size, join=True)
    utils.init_distributed_mode(args)

    device = torch.device(args.device)

    # Data loading code
    print("Loading data")

    data_transform = {
        "train": transform.Compose([transform.SSDCropping(),
                                    transform.Resize(),
                                    transform.ColorJitter(),
                                    transform.ToTensor(),
                                    transform.RandomHorizontalFlip(),
                                    transform.Normalization(),
                                    transform.AssignGTtoDefaultBox()]),
        "val": transform.Compose([transform.Resize(),
                                  transform.ToTensor(),
                                  transform.Normalization()])
    }

    VOC_root = args.data_path
    # load train data set
    train_data_set = VOC2012DataSet(VOC_root, data_transform["train"], train_set='train.txt')

    # load validation data set
    val_data_set = VOC2012DataSet(VOC_root, data_transform["val"], train_set='val.txt')

    print("Creating data loaders")
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_data_set)
        test_sampler = torch.utils.data.distributed.DistributedSampler(val_data_set)
    else:
        train_sampler = torch.utils.data.RandomSampler(train_data_set)
        test_sampler = torch.utils.data.SequentialSampler(val_data_set)

    if args.aspect_ratio_group_factor >= 0:
        # 统计所有图像比例在bins区间中的位置索引
        group_ids = create_aspect_ratio_groups(train_data_set, k=args.aspect_ratio_group_factor)
        train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size)
    else:
        train_batch_sampler = torch.utils.data.BatchSampler(
            train_sampler, args.batch_size, drop_last=True)

    data_loader = torch.utils.data.DataLoader(
        train_data_set, batch_sampler=train_batch_sampler, num_workers=args.workers,
        collate_fn=utils.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(
        val_data_set, batch_size=4,
        sampler=test_sampler, num_workers=args.workers,
        collate_fn=utils.collate_fn)

    print("Creating model")
    model = create_model(num_classes=21)
    model.to(device)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        model_without_ddp = model.module

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(
        params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
    # lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)

    # 如果传入resume参数,即上次训练的权重地址,则接着上次的参数训练
    if args.resume:
        # If map_location is missing, torch.load will first load the module to CPU
        # and then copy each parameter to where it was saved,
        # which would result in all processes on the same machine using the same set of devices.
        checkpoint = torch.load(args.resume, map_location='cpu')  # 读取之前保存的权重文件(包括优化器以及学习率策略)
        model_without_ddp.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    if args.test_only:
        utils.evaluate(model, data_loader_test, device=device)
        return

    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        utils.train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq)
        lr_scheduler.step()
        if args.output_dir:
            # 只在主节点上执行保存权重操作
            utils.save_on_master({
                'model': model_without_ddp.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lr_scheduler': lr_scheduler.state_dict(),
                'args': args,
                'epoch': epoch},
                os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))

        # evaluate after every epoch
        utils.evaluate(model, data_loader_test, device=device)

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
Example #3
0
def main():
    # get devices
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(device)

    # create model
    # 目标检测数 + 背景
    num_classes = 20 + 1
    model = create_model(num_classes=num_classes)

    # load train weights
    train_weights = "./save_weights/ssd300-14.pth"
    train_weights_dict = torch.load(train_weights,
                                    map_location=device)['model']

    model.load_state_dict(train_weights_dict)
    model.to(device)

    # read class_indict
    json_path = "./pascal_voc_classes.json"
    assert os.path.exists(json_path), "file '{}' dose not exist.".format(
        json_path)
    json_file = open(json_path, 'r')
    class_dict = json.load(json_file)
    category_index = {v: k for k, v in class_dict.items()}

    # load image
    original_img = Image.open("./test.jpg")

    # from pil image to tensor, do not normalize image
    data_transform = transform.Compose(
        [transform.Resize(),
         transform.ToTensor(),
         transform.Normalization()])
    img, _ = data_transform(original_img)
    # expand batch dimension
    img = torch.unsqueeze(img, dim=0)

    model.eval()
    with torch.no_grad():
        # initial model
        init_img = torch.zeros((1, 3, 300, 300), device=device)
        model(init_img)

        time_start = time_synchronized()
        predictions = model(
            img.to(device))[0]  # bboxes_out, labels_out, scores_out
        time_end = time_synchronized()
        print("inference+NMS time: {}".format(time_end - time_start))

        predict_boxes = predictions[0].to("cpu").numpy()
        predict_boxes[:,
                      [0, 2]] = predict_boxes[:, [0, 2]] * original_img.size[0]
        predict_boxes[:,
                      [1, 3]] = predict_boxes[:, [1, 3]] * original_img.size[1]
        predict_classes = predictions[1].to("cpu").numpy()
        predict_scores = predictions[2].to("cpu").numpy()

        if len(predict_boxes) == 0:
            print("没有检测到任何目标!")

        draw_box(original_img,
                 predict_boxes,
                 predict_classes,
                 predict_scores,
                 category_index,
                 thresh=0.5,
                 line_thickness=5)
        plt.imshow(original_img)
        plt.show()
Example #4
0
# read class_indict
category_index = {}
try:
    json_file = open('./pascal_voc_classes.json', 'r')
    class_dict = json.load(json_file)
    category_index = {v: k for k, v in class_dict.items()}
except Exception as e:
    print(e)
    exit(-1)

# load image
original_img = Image.open("./timg.jpg")

# from pil image to tensor, do not normalize image
data_transform = transform.Compose(
    [transform.Resize(),
     transform.ToTensor(),
     transform.Normalization()])
img, _ = data_transform(original_img)
# expand batch dimension
img = torch.unsqueeze(img, dim=0)

model.eval()
with torch.no_grad():
    predictions = model(
        img.to(device))[0]  # bboxes_out, labels_out, scores_out
    predict_boxes = predictions[0].to("cpu").numpy()
    predict_boxes[:, [0, 2]] = predict_boxes[:, [0, 2]] * original_img.size[0]
    predict_boxes[:, [1, 3]] = predict_boxes[:, [1, 3]] * original_img.size[1]
    predict_classes = predictions[1].to("cpu").numpy()
    predict_scores = predictions[2].to("cpu").numpy()
def main(parser_data):
    device = torch.device(
        parser_data.device if torch.cuda.is_available() else "cpu")
    print(device)

    if not os.path.exists("save_weights"):
        os.mkdir("save_weights")

    data_transform = {
        "train":
        transform.Compose([
            transform.SSDCropping(),
            transform.Resize(),
            transform.ColorJitter(),
            transform.ToTensor(),
            transform.RandomHorizontalFlip(),
            transform.Normalization(),
            transform.AssignGTtoDefaultBox()
        ]),
        "val":
        transform.Compose([
            transform.Resize(),
            transform.ToTensor(),
            transform.Normalization()
        ])
    }

    night_root = parser_data.data_path
    train_dataset = NightDataSet(night_root,
                                 data_transform['train'],
                                 train_set='train.txt')
    # aa = train_dataset[1]
    # 注意训练时,batch_size必须大于1
    train_data_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=8,
        shuffle=True,
        num_workers=4,
        collate_fn=utils.collate_fn)

    val_dataset = NightDataSet(night_root,
                               data_transform['val'],
                               train_set='val.txt')
    # bb = val_dataset[2]
    val_data_loader = torch.utils.data.DataLoader(val_dataset,
                                                  batch_size=4,
                                                  shuffle=False,
                                                  num_workers=0,
                                                  collate_fn=utils.collate_fn)

    model = create_model(num_classes=3, device=device)
    print(model)
    model.to(device)

    # define optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)
    # learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=5,
                                                   gamma=0.5)

    # 如果指定了上次训练保存的权重文件地址,则接着上次结果接着训练
    if parser_data.resume != "":
        checkpoint = torch.load(parser_data.resume)
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        parser_data.start_epoch = checkpoint['epoch'] + 1
        print("the training process from epoch{}...".format(
            parser_data.start_epoch))

    train_loss = []
    learning_rate = []
    val_map = []
    train_val_map = []

    val_data = None
    # 如果电脑内存充裕,可提前加载验证集数据,以免每次验证时都要重新加载一次数据,节省时间
    # val_data = get_coco_api_from_dataset(val_data_loader.dataset)
    for epoch in range(parser_data.start_epoch, parser_data.epochs):
        utils.train_one_epoch(model=model,
                              optimizer=optimizer,
                              data_loader=train_data_loader,
                              device=device,
                              epoch=epoch,
                              print_freq=50,
                              train_loss=train_loss,
                              train_lr=learning_rate)

        lr_scheduler.step()

        if epoch >= 20 or epoch == 10:
            utils.evaluate(model=model,
                           data_loader=val_data_loader,
                           device=device,
                           data_set=val_data,
                           mAP_list=val_map)

        # save weights
        save_files = {
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'lr_scheduler': lr_scheduler.state_dict(),
            'epoch': epoch
        }
        torch.save(save_files, "./save_weights/ssd512-{}.pth".format(epoch))

    # plot loss and lr curve
    if len(train_loss) != 0 and len(learning_rate) != 0:
        from plot_curve import plot_loss_and_lr
        plot_loss_and_lr(train_loss, learning_rate)

    # plot mAP curve
    if len(val_map) != 0:
        from plot_curve import plot_map
        plot_map(val_map)
Example #6
0
    n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
    args.n_gpu = n_gpu
    args.distributed = n_gpu > 1

    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend='gloo',
                                             init_method='env://')
        synchronize()

    device = 'cuda'

    train_trans = transform.Compose([
        transform.RandomResize(args.train_min_size_range, args.train_max_size),
        transform.RandomHorizontalFlip(0.5),
        transform.ToTensor(),
        transform.Normalize(args.pixel_mean, args.pixel_std)
    ])

    valid_trans = transform.Compose([
        transform.Resize(args.test_min_size, args.test_max_size),
        transform.ToTensor(),
        transform.Normalize(args.pixel_mean, args.pixel_std)
    ])

    train_set = COCODataset(args.path, 'train', train_trans)
    valid_set = COCODataset(args.path, 'val', valid_trans)

    # backbone = vovnet39(pretrained=True)
    # backbone = vovnet57(pretrained=True)
    # backbone = resnet18(pretrained=True)
def main(parser_data):
    device = torch.device(
        parser_data.device if torch.cuda.is_available() else "cpu")
    print(device)

    if not os.path.exists("save_weights"):
        os.mkdir("save_weights")

    data_transform = {
        "test":
        transform.Compose([
            transform.Resize(),
            transform.ToTensor(),
            transform.Normalization()
        ])
    }

    night_root = parser_data.data_path
    test_dataset = NightDataSet(night_root,
                                data_transform['test'],
                                train_set='test.txt')
    test_data_loader = torch.utils.data.DataLoader(test_dataset,
                                                   batch_size=4,
                                                   shuffle=False,
                                                   num_workers=0,
                                                   collate_fn=utils.collate_fn)

    model = create_model(num_classes=3, device=device)
    print(model)
    model.to(device)

    # define optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)
    # learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=5,
                                                   gamma=0.5)

    # 如果指定了上次训练保存的权重文件地址,则接着上次结果接着训练
    if parser_data.resume != "":
        checkpoint = torch.load(parser_data.resume)
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        parser_data.start_epoch = checkpoint['epoch'] + 1
        print("the training process from epoch{}...".format(
            parser_data.start_epoch))

    test_val_map = []

    val_data = None

    for epoch in range(parser_data.start_epoch, parser_data.epochs):
        utils.evaluate(model=model,
                       data_loader=test_data_loader,
                       device=device,
                       data_set=val_data,
                       mAP_list=test_val_map)
Example #8
0
def main(args):
    init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    results_file = "results{}.txt".format(
        datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))

    # Data loading code
    print("Loading data")

    data_transform = {
        "train":
        transform.Compose([
            transform.SSDCropping(),
            transform.Resize(),
            transform.ColorJitter(),
            transform.ToTensor(),
            transform.RandomHorizontalFlip(),
            transform.Normalization(),
            transform.AssignGTtoDefaultBox()
        ]),
        "val":
        transform.Compose([
            transform.Resize(),
            transform.ToTensor(),
            transform.Normalization()
        ])
    }

    VOC_root = args.data_path
    # check voc root
    if os.path.exists(os.path.join(VOC_root, "VOCdevkit")) is False:
        raise FileNotFoundError(
            "VOCdevkit dose not in path:'{}'.".format(VOC_root))

    # load train data set
    train_data_set = VOC2012DataSet(VOC_root,
                                    data_transform["train"],
                                    train_set='train.txt')

    # load validation data set
    val_data_set = VOC2012DataSet(VOC_root,
                                  data_transform["val"],
                                  train_set='val.txt')

    print("Creating data loaders")
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_data_set)
        test_sampler = torch.utils.data.distributed.DistributedSampler(
            val_data_set)
    else:
        train_sampler = torch.utils.data.RandomSampler(train_data_set)
        test_sampler = torch.utils.data.SequentialSampler(val_data_set)

    if args.aspect_ratio_group_factor >= 0:
        # 统计所有图像比例在bins区间中的位置索引
        group_ids = create_aspect_ratio_groups(
            train_data_set, k=args.aspect_ratio_group_factor)
        train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids,
                                                  args.batch_size)
    else:
        train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,
                                                            args.batch_size,
                                                            drop_last=True)

    data_loader = torch.utils.data.DataLoader(
        train_data_set,
        batch_sampler=train_batch_sampler,
        num_workers=args.workers,
        collate_fn=train_data_set.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(
        val_data_set,
        batch_size=1,
        sampler=test_sampler,
        num_workers=args.workers,
        collate_fn=train_data_set.collate_fn)

    print("Creating model")
    model = create_model(num_classes=args.num_classes + 1, device=device)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=args.lr_step_size,
                                                   gamma=args.lr_gamma)
    # lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)

    # 如果传入resume参数,即上次训练的权重地址,则接着上次的参数训练
    if args.resume:
        # If map_location is missing, torch.load will first load the module to CPU
        # and then copy each parameter to where it was saved,
        # which would result in all processes on the same machine using the same set of devices.
        checkpoint = torch.load(
            args.resume, map_location='cpu')  # 读取之前保存的权重文件(包括优化器以及学习率策略)
        model_without_ddp.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    if args.test_only:
        utils.evaluate(model, data_loader_test, device=device)
        return

    train_loss = []
    learning_rate = []
    val_map = []
    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)

        mean_loss, lr = utils.train_one_epoch(model,
                                              optimizer,
                                              data_loader,
                                              device,
                                              epoch,
                                              args.print_freq,
                                              warmup=True)
        # only first process to save training info
        if args.rank in [-1, 0]:
            train_loss.append(mean_loss.item())
            learning_rate.append(lr)

        # update learning rate
        lr_scheduler.step()

        # evaluate after every epoch
        coco_info = utils.evaluate(model, data_loader_test, device=device)

        if args.rank in [-1, 0]:
            # write into txt
            with open(results_file, "a") as f:
                # 写入的数据包括coco指标还有loss和learning rate
                result_info = [
                    str(round(i, 4))
                    for i in coco_info + [mean_loss.item(), lr]
                ]
                txt = "epoch:{} {}".format(epoch, '  '.join(result_info))
                f.write(txt + "\n")

            val_map.append(coco_info[1])  # pascal mAP

        if args.output_dir:
            # 只在主节点上执行保存权重操作
            save_on_master(
                {
                    'model': model_without_ddp.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'lr_scheduler': lr_scheduler.state_dict(),
                    'args': args,
                    'epoch': epoch
                }, os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))

    if args.rank in [-1, 0]:
        # plot loss and lr curve
        if len(train_loss) != 0 and len(learning_rate) != 0:
            from plot_curve import plot_loss_and_lr
            plot_loss_and_lr(train_loss, learning_rate)

        # plot mAP curve
        if len(val_map) != 0:
            from plot_curve import plot_map
            plot_map(val_map)
Example #9
0
    args = get_args()

    n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
    args.distributed = n_gpu > 1

    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend='gloo',
                                             init_method='env://')
        synchronize()

    device = 'cuda'

    valid_trans = transform.Compose([
        transform.Resize(args.test_min_size, args.test_max_size),
        transform.ToTensor(),
        transform.Normalize(args.pixel_mean, args.pixel_std)
    ])

    valid_set = COCODataset("/data/COCO_17/", 'val', valid_trans)

    # backbone = vovnet39(pretrained=False)
    # backbone = resnet18(pretrained=False)
    backbone = resnet50(pretrained=False)
    model = ATSS(args, backbone)

    # load weight
    model_file = "./training_dir/epoch-12.pt"
    chkpt = torch.load(model_file, map_location='cpu')  # load checkpoint
    model.load_state_dict(chkpt['model'])
    print('load weights from ' + model_file)
    return 0


if __name__ == '__main__':
    # 使用局部对齐模型
    model = ReIDNet(num_classes=751, loss={'softmax, metric'}, aligned=True)
    # 加载局部对齐模型最优参数
    model.load_state_dict(
        torch.load('./model/param/aligned_trihard_net_params_best.pth'))
    # 指定数据集
    dataset = Market1501()
    # query数据与gallery数据处理器
    transform = T.Compose([
        T.Resize((height, width)),  # 尺度统一
        T.ToTensor(),  # 图片转张量
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224,
                                                     0.225]),  # 归一化,参数固定
    ])
    # query集吞吐器
    query_data_loader = DataLoader(
        ImageDataset(dataset.query, transform=transform),  # 自定义的数据集,指定使用数据处理器
        batch_size=batch_size,  # 一个批次的大小(一个批次有多少个图片张量)
        drop_last=True,  # 丢弃最后无法称为一整个批次的数据
    )
    # gallery集吞吐器
    gallery_data_loader = DataLoader(
        ImageDataset(dataset.gallery,
                     transform=transform),  # 自定义的数据集,指定使用数据处理器
        batch_size=batch_size,  # 一个批次的大小(一个批次有多少个图片张量)
        drop_last=True,  # 丢弃最后无法称为一整个批次的数据
    )
Example #11
0
import engine as eng
import eval
import models
import transform
import utils
from datasets import CocoSingleKPS

IMAGE_SIZE = 256, 256

data_path, remaining_args = utils.get_args()
engine = eng.Engine.command_line_init(args=remaining_args)

data_transform = transform.Compose([
    transform.ResizeKPS(IMAGE_SIZE),
    transform.extract_keypoints,
    transform.ToTensor(),
    transform.ImageTargetWrapper(T.Normalize(CocoSingleKPS.MEAN, CocoSingleKPS.STD))
])

selected_kps = ['left_eye', 'right_eye']
coco_train = CocoSingleKPS.from_data_path(data_path, train=True, transforms=data_transform, keypoints=selected_kps)
coco_val = CocoSingleKPS.from_data_path(data_path, train=False, transforms=data_transform, keypoints=selected_kps)

num_instructions = len(selected_kps)
model = models.resnet50(td_outplanes=1, num_instructions=num_instructions)
if len(selected_kps) == 1:
    model.one_iteration()
model = models.SequentialInstructor(model, num_instructions)

train_eval = eval.Evaluator()
val_eval = eval.Evaluator()