示例#1
0
def main(num_classes, context_path, check_points):
    os.environ['CUDA_VISIBLE_DEVICES'] = '7'
    model = BiSeNet(num_classes, context_path).cuda()
    #if torch.cuda.is_available():
    #model.load_state_dict(torch.load(check_points))
    #model = torch.nn.DataParallel(model).cuda()
    #model.load_state_dict(torch.load(check_points))
    # load pretrained model if exists
    #model.eval()
    # load pretrained model if exists
    #print('load model from %s ...' % check_points)
    #model.load_state_dict(torch.load(check_points))
    print('load model from %s ...' % check_points)
    model.load_state_dict(torch.load(check_points))
    print('load model success')

    input_var = torch.randn(1, 3, 480, 640, device='cuda')
    #output_var = model(input_var)
    input_names = ["input1"]
    output_names = ["output1"]
    torch.onnx.export(model,
                      input_var,
                      'Bisenet_nearest.onnx',
                      verbose=True,
                      input_names=input_names,
                      output_names=output_names)
示例#2
0
def main(params):
    # parse the parameters
    parser = argparse.ArgumentParser()
    parser = add_arguments(parser)
    args = parser.parse_args(params)
    print("Training with following arguments:")
    pprint(vars(args), indent=4, compact=True)
    print("Running on: {}".format(device if args.use_gpu else torch.device('cpu')))
    # create dataset and dataloader
    train_path = args.data
    train_transform, val_transform = get_transform(
        args.random_crop_size, args.further_data_aug)

    dataset_train = VOC(train_path, image_set="train",
                        transform=train_transform)
    dataset_val = VOC(train_path, image_set="val", transform=val_transform)
    dataloader_train = DataLoader(dataset_train, batch_size=args.batch_size,
                                  shuffle=True, num_workers=args.num_workers, drop_last=True, )
    dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size,
                                shuffle=True, num_workers=args.num_workers, )

    # build model
    os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
    model = BiSeNet(args.num_classes, args.context_path)
    if args.use_gpu:
        model = model.to(device)

    # build optimizer
    optimizer = get_optim(args, model)

    # load pretrained model if exists
    if args.pretrained_model_path is not None:
        print("load model from %s ..." % args.pretrained_model_path)
        model.load_state_dict(torch.load(args.pretrained_model_path))
        print("Done!")

    scaler = amp.GradScaler() if args.use_amp else None

    # train
    train(args, model, optimizer, dataloader_train, dataloader_val, scaler)
    print("Training completed.", datetime.now().strftime("%m/%d/%Y, %H:%M:%S"))
        rmin -= delt
    if cmax > img_length:
        delt = cmax - img_length
        cmax = img_length
        cmin -= delt
    return rmin, rmax, cmin, cmax


####################################################################################################
################################### load BiSeNet parameters ########################################
####################################################################################################
print('load BiseNet')
start_time = time.time()
bise_model = BiSeNet(opt.num_classes, opt.context_path)
bise_model = bise_model.cuda()
bise_model.load_state_dict(torch.load(opt.checkpoint_path))
global bise_model
print('Done!')
print("Load time : {}".format(time.time() - start_time))

#####################################################################################################
######################## load Densefusion Netwopy4thork, 3d model #############################
#####################################################################################################
print('load densefusion network')
start_time = time.time()
estimator = PoseNet(num_points=num_points, num_obj=num_obj)
estimator.cuda()
estimator.load_state_dict(torch.load(opt.model))
estimator.eval()
############################################################################
refiner = PoseRefineNet(num_points=num_points, num_obj=num_obj)
示例#4
0
label_info = get_label_info(args.csv_path)

scale = (args.crop_height, args.crop_width)

# build model
os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda
model = BiSeNet(args.num_classes, args.context_path)

# load pretrained model if exists
print('load model from %s ...' % args.checkpoint_path)

if torch.cuda.is_available() and args.use_gpu:
    model = torch.nn.DataParallel(model).cuda()
    model.module.load_state_dict(torch.load(args.checkpoint_path))  # GPU -> GPU
else:
    model.load_state_dict(torch.load(args.checkpoint_path, map_location=lambda storage, loc: storage))  # GPU -> CPU

print('Done!')

resize_img = transforms.Resize(scale, Image.BILINEAR)
resize_depth = transforms.Resize(scale, Image.NEAREST)
to_tensor = transforms.ToTensor()


def predict_on_RGB(image):  # nd convenient both for img and video
    # pre-processing on image
    image = resize_img(image)
    image = transforms.ToTensor()(image).float().unsqueeze(0)

    # predict
    model.eval()
示例#5
0
def main(params):
    # basic parameters
    parser = argparse.ArgumentParser()
    parser.add_argument('--image',
                        action='store_true',
                        default=False,
                        help='predict on image')
    parser.add_argument('--video',
                        action='store_true',
                        default=False,
                        help='predict on video')
    parser.add_argument('--checkpoint_path',
                        type=str,
                        default=None,
                        help='The path to the pretrained weights of model')
    parser.add_argument('--context_path',
                        type=str,
                        default="resnet101",
                        help='The context path model you are using.')
    parser.add_argument('--num_classes',
                        type=int,
                        default=12,
                        help='num of object classes (with void)')
    parser.add_argument('--data',
                        type=str,
                        default=None,
                        help='Path to image or video for prediction')
    parser.add_argument(
        '--crop_height',
        type=int,
        default=720,
        help='Height of cropped/resized input image to network')
    parser.add_argument('--crop_width',
                        type=int,
                        default=960,
                        help='Width of cropped/resized input image to network')
    parser.add_argument('--cuda',
                        type=str,
                        default='0',
                        help='GPU ids used for training')
    parser.add_argument('--use_gpu',
                        type=bool,
                        default=True,
                        help='Whether to user gpu for training')
    parser.add_argument('--csv_path',
                        type=str,
                        default=None,
                        required=True,
                        help='Path to label info csv file')
    parser.add_argument('--save_path',
                        type=str,
                        default=None,
                        required=True,
                        help='Path to save predict image')

    args = parser.parse_args(params)

    # build model
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda
    net = BiSeNet(args.num_classes, args.context_path)
    if torch.cuda.is_available() and args.use_gpu:
        net = torch.nn.DataParallel(net).cuda()

    # load pretrained model if exists
    print('load model from %s ...' % args.checkpoint_path)
    # model.module.load_state_dict(torch.load(args.checkpoint_path))
    net.load_state_dict(torch.load(args.checkpoint_path, map_location='cpu'))
    print('Done!')

    # predict on image
    if args.image:
        predict_on_image(net, args)

    # predict on video
    if args.video:
        pass
示例#6
0
def main(params):
    # basic parameters
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_epochs',
                        type=int,
                        default=300,
                        help='Number of epochs to train for')
    parser.add_argument('--epoch_start_i',
                        type=int,
                        default=0,
                        help='Start counting epochs from this number')
    parser.add_argument('--checkpoint_step',
                        type=int,
                        default=10,
                        help='How often to save checkpoints (epochs)')
    parser.add_argument('--validation_step',
                        type=int,
                        default=10,
                        help='How often to perform validation (epochs)')
    parser.add_argument('--batch_size',
                        type=int,
                        default=1,
                        help='Number of images in each batch')
    parser.add_argument(
        '--context_path',
        type=str,
        default="resnet101",
        help='The context path model you are using, resnet18, resnet101.')
    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.01,
                        help='learning rate used for train')
    parser.add_argument('--data',
                        type=str,
                        default='data',
                        help='path of training data')
    parser.add_argument('--num_workers',
                        type=int,
                        default=4,
                        help='num of workers')
    parser.add_argument('--num_classes',
                        type=int,
                        default=32,
                        help='num of object classes (with void)')
    parser.add_argument('--cuda',
                        type=str,
                        default='0',
                        help='GPU ids used for training')
    parser.add_argument('--use_gpu',
                        type=bool,
                        default=True,
                        help='whether to user gpu for training')
    parser.add_argument('--pretrained_model_path',
                        type=str,
                        default=None,
                        help='path to pretrained model')
    parser.add_argument('--save_model_path',
                        type=str,
                        default="checkpoints",
                        help='path to save model')
    parser.add_argument('--optimizer',
                        type=str,
                        default='rmsprop',
                        help='optimizer, support rmsprop, sgd, adam')
    parser.add_argument('--loss',
                        type=str,
                        default='crossentropy',
                        help='loss function, dice or crossentropy')

    # settiamo i nostri parametri
    args = parser.parse_args(params)

    # create dataset and dataloader
    train_path = args.data
    train_transform, val_transform = get_transform()

    # creiamo un oggetto di tipo VOC per il training
    dataset_train = VOC(train_path,
                        image_set="train",
                        transform=train_transform)
    dataloader_train = DataLoader(dataset_train,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  num_workers=args.num_workers,
                                  drop_last=True)

    # creiamo un oggetto di tipo VOC per la validation
    dataset_val = VOC(train_path, image_set="val", transform=val_transform)
    dataloader_val = DataLoader(dataset_val,
                                batch_size=args.batch_size,
                                shuffle=True,
                                num_workers=args.num_workers)

    # build model
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda
    model = BiSeNet(args.num_classes, args.context_path)
    if torch.cuda.is_available() and args.use_gpu:
        model = model.cuda()

    # build optimizer
    if args.optimizer == 'rmsprop':
        optimizer = torch.optim.RMSprop(model.parameters(), args.learning_rate)
    elif args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.learning_rate,
                                    momentum=0.9,
                                    weight_decay=1e-4)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), args.learning_rate)
    else:  # rmsprop
        print('not supported optimizer \n')
        return None

    # load pretrained model if exists
    # Non ce l'abbiamo
    if args.pretrained_model_path is not None:
        print('load model from %s ...' % args.pretrained_model_path)
        model.load_state_dict(torch.load(args.pretrained_model_path))
        print('Done!')

    # train
    # funzioni presenti in questo file
    train(args, model, optimizer, dataloader_train, dataloader_val)

    val(args, model, dataloader_val)
示例#7
0
def main(params):

    parser = argparse.ArgumentParser()

    parser.add_argument('--save_model_path',
                        type=str,
                        default=None,
                        help='path to save model')
    parser.add_argument('--num_classes',
                        type=int,
                        default=32,
                        help='num of object classes (with void)')
    parser.add_argument(
        '--context_path',
        type=str,
        default="resnet18",
        help='The context path model you are using, resnet18, resnet101.')
    args = parser.parse_args(params)

    model = BiSeNet(args.num_classes, args.context_path)
    model.load_state_dict(
        torch.load(os.path.join(args.save_model_path, 'best_dice_loss.pth')))
    model.eval()

    img = Image.open('./CamVid/test/Seq05VD_f00660.png')
    transform = transforms.Compose([
        transforms.Resize([720, 960]),
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])
    img = transform(img).unsqueeze(dim=0)

    imresult = np.zeros([img.shape[2], img.shape[3], 3], dtype=np.uint8)
    with torch.no_grad():
        predict = model(img).squeeze()
        predict = reverse_one_hot(predict)
        predict = np.array(predict)
        imresult[:, :][predict == 0] = [255, 51, 255]
        imresult[:, :][predict == 1] = [255, 0, 0]
        imresult[:, :][predict == 2] = [0, 255, 0]
        imresult[:, :][predict == 3] = [0, 0, 255]
        imresult[:, :][predict == 4] = [255, 255, 0]
        imresult[:, :][predict == 5] = [255, 0, 255]
        imresult[:, :][predict == 6] = [0, 255, 255]
        imresult[:, :][predict == 7] = [10, 200, 128]
        imresult[:, :][predict == 8] = [125, 18, 78]
        imresult[:, :][predict == 9] = [205, 128, 8]
        imresult[:, :][predict == 10] = [144, 208, 18]
        imresult[:, :][predict == 11] = [5, 88, 198]
    cv2.imwrite('result.png', imresult)
    print('inference done')

    summary(model, (3, 720, 960), 1, "cpu")

    macs, params = profile(model, inputs=(img, ))
    print('macs', macs)
    print('params', params)

    log = open('log.txt', 'w')
    EXPORTONNXNAME = 'nit-bisenet.onnx'
    try:
        torch.onnx.export(
            model,
            img,
            EXPORTONNXNAME,
            export_params=True,
            do_constant_folding=True,
            input_names=['data'],
            # output_names = ['output']
            output_names=['output'])
    except Exception:
        traceback.print_exc(file=log)

    print('export done')
示例#8
0
def main():
  # Call Python's garbage collector, and empty torch's CUDA cache. Just in case
  gc.collect()
  torch.cuda.empty_cache()
  
  # Enable cuDNN in benchmark mode. For more info see:
  # https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936
  torch.backends.cudnn.enabled = True
  torch.backends.cudnn.benchmark = True

  # Load Bisenet generator
  generator = BiSeNet(NUM_CLASSES, CONTEXT_PATH).cuda()
  generator.load_state_dict(torch.load('./checkpointBeta01/0.01_45_Generator.pth'))
  generator.train()
  # Build discriminator
  discriminator = Discriminator(NUM_CLASSES).cuda()
  discriminator.load_state_dict(torch.load('./checkpointBeta01/0.01_45_Discriminator.pth'))
  discriminator.train()

  # Load source dataset
  source_dataset = IDDA(
      image_path=IDDA_PATH,
      label_path=IDDA_LABEL_PATH,
      classes_info_path=JSON_IDDA_PATH,
      scale=(CROP_HEIGHT, CROP_WIDTH),
      loss=LOSS,
      mode='train'
  )
  source_dataloader = DataLoader(
        source_dataset,
        batch_size=BATCH_SIZE_IDDA,
        shuffle=True,
        num_workers=NUM_WORKERS,
        drop_last=True,
        pin_memory=True
    )

  # Load target dataset
  target_dataset = CamVid(
    image_path=CAMVID_PATH,
    label_path= CAMVID_LABEL_PATH,csv_path= CSV_CAMVID_PATH,
    scale=(CROP_HEIGHT,
    CROP_WIDTH),
    loss=LOSS,
    mode='adversarial_train'
  )
  target_dataloader = DataLoader(
        target_dataset,
        batch_size=BATCH_SIZE_CAMVID,
        shuffle=True,
        num_workers=NUM_WORKERS,
        drop_last=True,
        pin_memory=True
    )

  optimizer_BiSeNet = torch.optim.SGD(generator.parameters(), lr = LEARNING_RATE_SEGMENTATION, momentum = MOMENTUM, weight_decay = WEIGHT_DECAY)   
  optimizer_discriminator = torch.optim.Adam(discriminator.parameters(), lr = LEARNING_RATE_DISCRIMINATOR, betas = (0.9,0.99))

  # Loss for discriminator training
  # Sigmoid layer + BCELoss
  bce_loss = nn.BCEWithLogitsLoss()

  # Loss for segmentation loss
  # Log-softmax layer + 2D Cross Entropy
  cross_entropy_loss = CrossEntropy2d()

  # for epoch in range(NUM_STEPS):
  for epoch in range(46, 51):
    source_dataloader_iter = iter(source_dataloader)
    target_dataloader_iter = iter(target_dataloader)

    print(f'begin epoch {epoch}')

    # Initialize gradients=0 for Generator and Discriminator
    optimizer_BiSeNet.zero_grad()
    optimizer_discriminator.zero_grad()

    # Setting losses equal to 0
    l_seg_to_print_acc, l_adv_to_print_acc, l_d_to_print_acc = 0, 0, 0

    # Compute learning rate for this epoch
    adjust_learning_rate(optimizer_BiSeNet, LEARNING_RATE_SEGMENTATION, epoch, NUM_STEPS, POWER)
    adjust_learning_rate(optimizer_discriminator, LEARNING_RATE_DISCRIMINATOR, epoch, NUM_STEPS, POWER)

    for i in tqdm(range(len(target_dataloader))):
      optimizer_BiSeNet.zero_grad()
      optimizer_discriminator.zero_grad()
      l_seg_to_print, l_adv_to_print, l_d_to_print = minibatch(source_dataloader_iter, target_dataloader_iter, generator, discriminator, cross_entropy_loss, bce_loss, source_dataloader, target_dataloader)
      l_seg_to_print_acc += l_seg_to_print
      l_adv_to_print_acc += l_adv_to_print
      l_d_to_print_acc += l_d_to_print
      # Run optimizers using the gradient obtained via backpropagations
      optimizer_BiSeNet.step()
      optimizer_discriminator.step()
    
    # Output at each epoch
    print(f'epoch = {epoch}/{NUM_STEPS}, loss_seg = {l_seg_to_print_acc:.3f}, loss_adv = {l_adv_to_print_acc:.3f}, loss_D = {l_d_to_print_acc:.3f}')

    # Save intermediate generator (checkpoint)
    if epoch % CHECKPOINT_STEP == 0 and epoch != 0:
      # If the directory does not exists create it
      if not os.path.isdir(CHECKPOINT_PATH):
        os.mkdir(CHECKPOINT_PATH)
      # Save the parameters of the generator (segmentation network) and discriminator 
      generator_checkpoint_path = os.path.join(CHECKPOINT_PATH, f"{BETA}_{epoch}_Generator.pth")
      torch.save(generator.state_dict(), generator_checkpoint_path)
      discriminator_checkpoint_path = os.path.join(CHECKPOINT_PATH, f"{BETA}_{epoch}_Discriminator.pth")
      torch.save(discriminator.state_dict(), discriminator_checkpoint_path)
      print(f"saved:\n{generator_checkpoint_path}\n{discriminator_checkpoint_path}")