Пример #1
0
def get_fcn_model(num_classes, use_gpu):
    vgg_model = VGGNet(requires_grad=True, remove_fc=True, batch_norm=True)
    fcn_model = FCN8sScaledBN(pretrained_net=vgg_model, n_class=num_classes)

    if use_gpu:
        ts = time.time()
        vgg_model = vgg_model.cuda()
        fcn_model = fcn_model.cuda()
        num_gpu = list(range(torch.cuda.device_count()))
        fcn_model = nn.DataParallel(fcn_model, device_ids=num_gpu)

        print("Finish cuda loading, time elapsed {}".format(time.time() - ts))

    return fcn_model
def create_model(path):
    n_class =5 
    python2_state_dict = pickle.load(open(path,"rb"))

    vgg_model = VGGNet(requires_grad=True, remove_fc=True)
    model = FCNs(pretrained_net=vgg_model, n_class=n_class)
    model = nn.DataParallel(model, device_ids=[0])
    model.load_state_dict(python2_state_dict)

    if torch.cuda.is_available():
        model = model.cuda()

    return model
Пример #3
0
    def __init__(self):
        super(Model, self).__init__()

        resnet18 = models.resnet18()
        self.res18_conv = nn.Sequential(*list(resnet18.children())[:-3])
        self.FPN = fpn.FPN101()

        self.vgg_model = VGGNet(requires_grad=True)
        self.FCN = FCN8s(pretrained_net=self.vgg_model, n_class=3)
        self.fcn_conv = nn.Conv2d(256, 3, 1)
        self.fcn_bnorm = nn.BatchNorm2d(3)

        self.C_SEG = nn.ModuleList(
            [ComponentSeg(i) for i in [2, 2, 2, 2, 2, 4]])
        self.C_PRED = ComponentPred()
Пример #4
0
def predict():
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    #model = resnet152()
    vgg_model = VGGNet()
    model = FCN16s(pretrained_net=vgg_model)
    model.load_state_dict(torch.load("models_fcn16s/fcn16s_294.pth"))
    model = model.to(device)
    #model = torch.load('checkpoints_v3p_v0316/deeplabv3plus_model_34.pt')
    save_dir = 'predict_test_fcn16s/'
    model.eval()
    with torch.no_grad():
        for index, (image_name_batch, image,
                    label) in enumerate(test_dataloader):
            #print(image_name_batch)
            image = image.to(device)
            #label = label.to(device)
            predict = model(image)  #(4,5,640,640)
            predict_index = torch.argmax(
                predict, dim=1, keepdim=False).cpu().numpy()  #(4, 640,640)
            seg_color = label2color(colors, 4, predict_index)
            save(save_dir, image_name_batch, seg_color)
Пример #5
0
def check(args):
    #model = Unet(3, 3)
    vgg_model = VGGNet(requires_grad=True, remove_fc=True)
    model = FCN8s(pretrained_net=vgg_model, n_class=3)
    model.load_state_dict(torch.load(args.ckpt,map_location='cpu'))
    model.eval()
             
    import PIL.Image as Image
    img = Image.open('./data_road/training/image/um_000005.png')
    #img = Image.open('/home/cvlab04/Desktop/Code/Medical/u_net_liver/A001-23230277-27.jpeg').convert('RGB')
    img = x_transforms(img)
    #img = img.view(1,3,375,1242)
    img = img.view(1,3,352,1216)
    import matplotlib.pyplot as plt
    with torch.no_grad():
        output= model(img)
        #print(output.shape)
        output = torch.softmax(output,dim=1)
        N, _, h, w = output.shape
        #print(output)
        pred = output.transpose(0, 2).transpose(3, 1).reshape(-1, 3).argmax(axis=1).reshape(N, h, w) #class 3
        pred = pred.squeeze(0)
        print(pred)
        Decode_image(pred)
def train():
    torch.manual_seed(1280)
    torch.cuda.manual_seed(1280)
    np.random.seed(1280)

    # setup device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("device = {}".format(device))
    # load data
    data_loader_train = Cityscapes_Loader(path_root=path_root,split="train",n_classes=n_class)
    data_loader_val = Cityscapes_Loader(path_root=path_root,split="val",n_classes=n_class)

    train_loader = data.DataLoader(data_loader_train,batch_size=batch_size,num_workers=16)
    val_loader = data.DataLoader(data_loader_val,batch_size=batch_size,num_workers=16)  # val batch_size=1

    # set model
    assert torch.cuda.is_available(), "先把下面的cuda()删掉,debug阶段,不支持cpu"
    # if torch.cuda.is_available():
    #     torch.backends.cudnn.benchmark = True

    pretrain_model = VGGNet(requires_grad=True, remove_fc=True)
    model = FCNs(pretrained_net=pretrain_model, n_class=n_class)

    print("model loading success...")

    # set model running devices
    model.to(device)
    model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
    print("usable gpu num: {}".format(torch.cuda.device_count()))

    # set optimizer, lr_scheduler, loss function
    optimizer = None
    if optimizer_name == "rmsprop":
        optimizer = torch.optim.RMSprop(model.parameters(),lr=learn_rate,momentum=momentum,weight_decay=weight_decay)
    elif optimizer_name == "sgd":
        optimizer = torch.optim.SGD(model.parameters(),lr=learn_rate,momentum=momentum,weight_decay=weight_decay)
    elif optimizer_name == "adam":
        optimizer = torch.optim.Adam(model.parameters(),lr=learn_rate)
        
    #criterion = mybasilLoss()
    criterion = torch.nn.BCEWithLogitsLoss()

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)

    # load checkpoints
    last_best_iou = -100.
    load_ckpt_path = os.path.join("checkpoints",load_ckpt_name)
    if is_load_checkpoints:
        if torch.cuda.is_available():
            checkpoint = torch.load(load_ckpt_path)
        else:
            checkpoint = torch.load(load_ckpt_path, map_location='cpu')

        model.load_state_dict(checkpoint['model_state'])
        last_best_iou = checkpoint['best_iou']
        start_epoch = checkpoint['epoch']

        print('Checkpoint resume success... last iou:{:.4%}'.format(last_best_iou))

    # train epoch
    best_iou = last_best_iou
    time_epoch = time.time()
    i = 0
    for epoch in range(epochs):
        time_step = time.time()
        for step, batch in enumerate(train_loader):
            lr_scheduler.step(epoch=epoch)  # 这个scheduler放在哪里?上一个for,还是这个for
            model.train()
            images = batch[0].to(device)
            #labels = batch[1].to(device)
            targets = batch[2].to(device)  # targets.shape=[batch, n_classes, H, W]

            optimizer.zero_grad()

            outputs = model(images)
            loss = None

            try:
                loss = criterion(input=outputs,target=targets)
            except:
                torch.cuda.empty_cache()
                loss = criterion(input=outputs, target=targets)

            loss.backward()
            optimizer.step()

            if(step%100 == 0):
                print("train after setp:{}, Loss:{:.4%}, time used:{:.4} s".format(step,loss.item(),time.time()-time_step))
                time_step = time.time()
                writer.add_scalars('ForTestOnly_record/Loss', {'train_loss': loss}, i + 1)
                i += 1

        # each epoch save the checkpoint
        model.eval()
        with torch.no_grad():
            total_iou = 0
            for step_val, batch_val in enumerate(val_loader):
                images_val = batch_val[0].to(device)
                labels_val = batch_val[1].to(device)
                targets_val = batch_val[2].to(device)

                outputs_val = model(images_val)
                loss_val = criterion(input=outputs_val,target=targets_val)

                pred = outputs_val.data.max(1)[1].cpu().numpy()  # 将mask格式[
                gt = labels_val.data.cpu().numpy()
                #break  # for only one val batch

                total_iou += calc_iou(pred,gt,n_class)

            mean_iou = total_iou/step_val
            print("epoch:{},loss_val:{:.4%},iou:{:.2%},total time used:{:.4}s".format(epoch + 1, loss_val, mean_iou,time.time()-time_epoch))
            writer.add_scalars('Train_record/Loss',{'train_loss':loss,'val_loss':loss_val}, epoch+1)
            writer.add_scalars('Train_record/iou',{"iou":mean_iou}, epoch+1)

            time_epoch = time.time()
            if mean_iou >= best_iou:
                best_iou = mean_iou
                state ={
                    "epoch":epoch + 1,
                    "model_state":model.state_dict(),
                    "best_iou":best_iou,
                }
                if not os.path.isdir('./checkpoints'):
                    os.mkdir('./checkpoints')
                save_path = os.path.join('./checkpoints', "eph_{}_iou_{:.2%}.ckpt.pth".format(epoch+1,best_iou))
                torch.save(state,save_path)
                print("checkpoint saved success")

    writer.close()
Пример #7
0
def train_fcn(train_img_path, pths_path, batch_size, lr, num_workers,
              epoch_iter, interval):
    #file_num = 1056 #暂定
    if (not os.path.exists(pths_path)):
        os.makedirs(pths_path)

    trainset = fcn_dataset(train_img_path)
    train_loader = data.DataLoader(trainset, batch_size=batch_size, \
                                   shuffle=True, num_workers=num_workers, drop_last=False)
    #criterion = cross_entropy2d()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    #device = torch.device("cpu")
    vgg_model = VGGNet(pretrained=False, requires_grad=True, remove_fc=True)
    fcn_model = FCN8s(pretrained_net=vgg_model, n_class=2)
    vgg_model.to(device)
    fcn_model.to(device)

    data_parallel = False
    #model.to(device)
    optimizer = torch.optim.Adam(fcn_model.parameters(), lr=lr)
    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=[epoch_iter // 2],
                                         gamma=0.1)
    #criterion = BinaryDiceLoss()

    for epoch in range(epoch_iter):
        fcn_model.train()

        epoch_loss = 0
        epoch_time = time.time()
        for i, (img, mask) in enumerate(train_loader):
            start_time = time.time()
            img, mask = img.to(device), mask.to(device)

            output = fcn_model(img)
            #loss = nn.BCEWithLogitsLoss(output, mask)
            loss = cross_entropy2d(output, mask)
            #loss = get_dice_loss(output, mask)
            #loss = criterion(output, mask)
            #loss /= len(img)
            epoch_loss += loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()

            print('Epoch is [{}/{}], time consumption is {:.8f}, batch loss is {:.8f}'.format(\
                epoch+1, epoch_iter, time.time()-start_time, loss.item()))

        print('epoch_loss is {:.8f}, epoch_time is {:.8f}'.format(
            epoch_loss / len(img),
            time.time() - epoch_time))
        print(time.asctime(time.localtime(time.time())))
        print('=' * 50)

        if (epoch + 1) % interval == 0:
            state_dict = fcn_model.module.state_dict(
            ) if data_parallel else fcn_model.state_dict()
            torch.save(
                state_dict,
                os.path.join(pths_path,
                             'model_epoch_{}.pth'.format(epoch + 1)))
def main():

    torch.backends.cudnn.benchmark = True
    os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
    device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")

    vgg_model = VGGNet(requires_grad=True, remove_fc=True)
    net = FCN8s(pretrained_net=vgg_model,
                n_class=cityscapes.num_classes,
                dropout_rate=0.4)
    print('load model ' + args['snapshot'])

    vgg_model = vgg_model.to(device)
    net = net.to(device)

    if torch.cuda.device_count() > 1:
        net = nn.DataParallel(net)
    net.load_state_dict(
        torch.load(os.path.join(ckpt_path, args['exp_name'],
                                args['snapshot'])))
    net.eval()

    mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    short_size = int(min(args['input_size']) / 0.875)
    val_joint_transform = joint_transforms.Compose([
        joint_transforms.Scale(short_size),
        joint_transforms.CenterCrop(args['input_size'])
    ])
    test_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(*mean_std)])
    target_transform = extended_transforms.MaskToTensor()
    restore_transform = transforms.Compose(
        [extended_transforms.DeNormalize(*mean_std),
         transforms.ToPILImage()])

    # test_set = cityscapes.CityScapes('test', transform=test_transform)

    test_set = cityscapes.CityScapes('test',
                                     joint_transform=val_joint_transform,
                                     transform=test_transform,
                                     target_transform=target_transform)

    test_loader = DataLoader(test_set,
                             batch_size=1,
                             num_workers=8,
                             shuffle=False)

    transform = transforms.ToPILImage()

    check_mkdir(os.path.join(ckpt_path, args['exp_name'], 'test'))

    gts_all, predictions_all = [], []
    count = 0
    for vi, data in enumerate(test_loader):
        # img_name, img = data
        img_name, img, gts = data

        img_name = img_name[0]
        # print(img_name)
        img_name = img_name.split('/')[-1]
        # img.save(os.path.join(ckpt_path, args['exp_name'], 'test', img_name))

        img_transform = restore_transform(img[0])
        # img_transform = img_transform.convert('RGB')
        img_transform.save(
            os.path.join(ckpt_path, args['exp_name'], 'test', img_name))
        img_name = img_name.split('_leftImg8bit.png')[0]

        # img = Variable(img, volatile=True).cuda()
        img, gts = img.to(device), gts.to(device)
        output = net(img)

        prediction = output.data.max(1)[1].squeeze_(1).squeeze_(
            0).cpu().numpy()
        prediction_img = cityscapes.colorize_mask(prediction)
        # print(type(prediction_img))
        prediction_img.save(
            os.path.join(ckpt_path, args['exp_name'], 'test',
                         img_name + '.png'))
        # print(ckpt_path, args['exp_name'], 'test', img_name + '.png')

        print('%d / %d' % (vi + 1, len(test_loader)))
        gts_all.append(gts.data.cpu().numpy())
        predictions_all.append(prediction)
        # break

        # if count == 1:
        #     break
        # count += 1
    gts_all = np.concatenate(gts_all)
    predictions_all = np.concatenate(prediction)
    acc, acc_cls, mean_iou, _ = evaluate(predictions_all, gts_all,
                                         cityscapes.num_classes)

    print(
        '-----------------------------------------------------------------------------------------------------------'
    )
    print('[acc %.5f], [acc_cls %.5f], [mean_iu %.5f]' %
          (acc, acc_cls, mean_iu))
Пример #9
0
                          batch_size=args.batch_size,
                          shuffle=True,
                          num_workers=4)

test_dataset = ImageDataset(txt_file='testing.txt',
                            root_dir='data/SmithCVPR2013_dataset_resized',
                            warp_on_fly=True,
                            bg_indexs=set([0, 1, 2, 3, 4, 5, 7, 8, 9, 10]),
                            transform=transforms.Compose([ToTensor()]))

test_loader = DataLoader(test_dataset,
                         batch_size=args.batch_size,
                         shuffle=True,
                         num_workers=4)

vgg_model = VGGNet(requires_grad=True)
model = FCN8s(pretrained_net=vgg_model, n_class=2)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
model = model.to(device)

criterion3 = nn.CrossEntropyLoss().to(device)


def train2(epoch, model, train_loader, optimizer):
    loss_list3 = []

    model.train()

    for j, batch in enumerate(train_loader):
        optimizer.zero_grad()
        image, labels, rects = batch['image'].to(device), batch['labels'].to(
def val():
    # setup device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("device = {}".format(device))
    # load data

    demo_path = os.path.join("demo")
    if not os.path.isdir(demo_path):
        os.mkdir(demo_path)

    imgs_path_list = img_loader(demo_path)

    # set model
    pretrain_model = VGGNet(requires_grad=True, remove_fc=True)
    model = FCNs(pretrained_net=pretrain_model, n_class=n_class)
    print("model loading success...")

    # set model running devices
    model.to(device)
    model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
    print("usable gpu num: {}".format(torch.cuda.device_count()))

    # load checkpoints
    load_ckpt_path = os.path.join("checkpoints",load_ckpt_name)

    if torch.cuda.is_available():
        checkpoint = torch.load(load_ckpt_path)
    else:
        checkpoint = torch.load(load_ckpt_path, map_location='cpu')

    model.load_state_dict(checkpoint['model_state'])
    last_best_iou = checkpoint['best_iou']
    start_epoch = checkpoint['epoch']

    print('Checkpoint resume success... last iou:{:.4%}'.format(last_best_iou))
    time_s = time.time()
    model.eval()
    with torch.no_grad():

        images = []
        for i, img_path in zip(range(len(imgs_path_list)), imgs_path_list):
            #image = plt.imread(img_path)
            image = Image.open(img_path)
            image = image.resize((1024, 512), Image.ANTIALIAS)
            image = np.array(image) / 255.
            image = image[:,:,::-1]  # RGB => BGR
            images.append(image)

        images = np.array(images,dtype=np.float32)
        images = images.transpose([0,3,1,2])
        images_tensor = torch.tensor(images, dtype=torch.float32)
        images_tensor.to(device)
        outputs_val = model(images_tensor)
        pred = outputs_val.data.max(1)[1].cpu().numpy()

        for i in range(len(imgs_path_list)):
            plt.figure(i)
            plt.subplot(2,2,1)
            plt.title("Origin image")
            plt.imshow(images[i].transpose([1,2,0])[:,:,::-1])

            rgb_img = index2color(pred[i, :, :])

            plt.subplot(2,2,2)
            plt.title("Semantic Segmentation Predict, mIoU:{:.2%}".format(last_best_iou))
            plt.imshow(rgb_img.astype(np.int))

            plt.subplot(2,2,3)
            # show color2class bar
            range_cmap = [[i for i in range(n_class)]]
            # 自定义colormap
            c_map = mpl.colors.LinearSegmentedColormap.from_list('cmap', np.array(get_color_index()[:n_class]) / 255., 256)
            plt.imshow(range_cmap, cmap=c_map)
            plt.xticks(range_cmap[0],
                       ['Void', 'Road', 'Construction', 'Traffic light', 'Nature', 'Sky', 'Person', 'Vehicle'],
                       rotation=50)
        print("time used per image:{:.3}s ".format((time.time() - time_s)/len(imgs_path_list)))
        plt.show()
def main():
    # args = parse_args()

    torch.backends.cudnn.benchmark = True
    os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
    device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")

    # # if args.seed:
    # random.seed(args.seed)
    # np.random.seed(args.seed)
    # torch.manual_seed(args.seed)
    # # if args.gpu:
    # torch.cuda.manual_seed_all(args.seed)
    seed = 63
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    # if args.gpu:
    torch.cuda.manual_seed_all(seed)

    mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    # train_transforms = transforms.Compose([
    # 	transforms.RandomCrop(args['crop_size']),
    # 	transforms.RandomRotation(90),
    # 	transforms.RandomHorizontalFlip(p=0.5),
    # 	transforms.RandomVerticalFlip(p=0.5),

    # 	])
    short_size = int(min(args['input_size']) / 0.875)
    # val_transforms = transforms.Compose([
    # 	transforms.Scale(short_size, interpolation=Image.NEAREST),
    # 	# joint_transforms.Scale(short_size),
    # 	transforms.CenterCrop(args['input_size'])
    # 	])
    train_joint_transform = joint_transforms.Compose([
        # joint_transforms.Scale(short_size),
        joint_transforms.RandomCrop(args['crop_size']),
        joint_transforms.RandomHorizontallyFlip(),
        joint_transforms.RandomRotate(90)
    ])
    val_joint_transform = joint_transforms.Compose([
        joint_transforms.Scale(short_size),
        joint_transforms.CenterCrop(args['input_size'])
    ])
    input_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(*mean_std)])
    target_transform = extended_transforms.MaskToTensor()
    restore_transform = transforms.Compose(
        [extended_transforms.DeNormalize(*mean_std),
         transforms.ToPILImage()])
    visualize = transforms.ToTensor()

    train_set = cityscapes.CityScapes('train',
                                      joint_transform=train_joint_transform,
                                      transform=input_transform,
                                      target_transform=target_transform)
    # train_set = cityscapes.CityScapes('train', transform=train_transforms)
    train_loader = DataLoader(train_set,
                              batch_size=args['train_batch_size'],
                              num_workers=8,
                              shuffle=True)
    val_set = cityscapes.CityScapes('val',
                                    joint_transform=val_joint_transform,
                                    transform=input_transform,
                                    target_transform=target_transform)
    # val_set = cityscapes.CityScapes('val', transform=val_transforms)
    val_loader = DataLoader(val_set,
                            batch_size=args['val_batch_size'],
                            num_workers=8,
                            shuffle=True)

    print(len(train_loader), len(val_loader))

    # sdf

    vgg_model = VGGNet(requires_grad=True, remove_fc=True)
    net = FCN8s(pretrained_net=vgg_model,
                n_class=cityscapes.num_classes,
                dropout_rate=0.4)
    # net.apply(init_weights)
    criterion = nn.CrossEntropyLoss(ignore_index=cityscapes.ignore_label)

    optimizer = optim.Adam(net.parameters(), lr=1e-4)

    check_mkdir(ckpt_path)
    check_mkdir(os.path.join(ckpt_path, exp_name))
    open(
        os.path.join(ckpt_path, exp_name,
                     str(datetime.datetime.now()) + '.txt'),
        'w').write(str(args) + '\n\n')

    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, 'min', patience=args['lr_patience'], min_lr=1e-10)

    vgg_model = vgg_model.to(device)
    net = net.to(device)

    if torch.cuda.device_count() > 1:
        net = nn.DataParallel(net)

    if len(args['snapshot']) == 0:
        curr_epoch = 1
        args['best_record'] = {
            'epoch': 0,
            'val_loss': 1e10,
            'acc': 0,
            'acc_cls': 0,
            'mean_iu': 0
        }
    else:
        print('training resumes from ' + args['snapshot'])
        net.load_state_dict(
            torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
        split_snapshot = args['snapshot'].split('_')
        curr_epoch = int(split_snapshot[1]) + 1
        args['best_record'] = {
            'epoch': int(split_snapshot[1]),
            'val_loss': float(split_snapshot[3]),
            'acc': float(split_snapshot[5]),
            'acc_cls': float(split_snapshot[7]),
            'mean_iu': float(split_snapshot[9][:-4])
        }

    criterion.to(device)

    for epoch in range(curr_epoch, args['epoch_num'] + 1):
        train(train_loader, net, device, criterion, optimizer, epoch, args)
        val_loss = validate(val_loader, net, device, criterion, optimizer,
                            epoch, args, restore_transform, visualize)
        scheduler.step(val_loss)
Пример #12
0
print('there are {} devices'.format(device_amount))
train_loader = DataLoader(train_set,
                          batch_size=batch_size,
                          shuffle=False,
                          num_workers=multi_thread_loader)
val_loader = DataLoader(val_set,
                        batch_size=device_amount,
                        shuffle=False,
                        num_workers=multi_thread_loader)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

if args.model:
    fcn_model = torch.load(args.model).to(device)
else:
    vgg_model = VGGNet(requires_grad=True, model=vgg_config,
                       remove_fc=True).to(device)
    fcn_model = nn.DataParallel(FCNs(pretrained_net=vgg_model,
                                     n_class=n_class)).to(device)

criterion = nn.NLLLoss()

params = list()
for name, param in fcn_model.named_parameters():
    if 'pretrained_net' in name:  # use small learning rate for
        params += [{'params': param, 'lr': lr_pretrain}]
    else:
        params += [{'params': param, 'lr': lr}]

optimizer = optim.RMSprop(params, weight_decay=w_decay)
optimizer = nn.DataParallel(optimizer)
scheduler = lr_scheduler.StepLR(optimizer.module,
Пример #13
0
CamVid_train=camvid(data_dir,label_idx_dir,cam_train,cam_val,train=True,transform=tsfm1)
CamVid_val=camvid(data_dir,label_idx_dir,cam_train,cam_val,train=False,transform=tsfm2)


# In[20]:


train_loader=DataLoader(CamVid_train,batch_size=batch_size,shuffle=True)
eval_loader=DataLoader(CamVid_val,batch_size=batch_size,shuffle=True)


# In[21]:


vgg_model=VGGNet(pretrained=True,model='vgg16',requires_grad=True,remove_fc=True)
fcn_model=FCNs(n_classes=n_classes,pretrained_net=vgg_model)


# In[22]:


criterion=nn.BCEWithLogitsLoss()
optimizer=optim.RMSprop(fcn_model.parameters(),lr=lr,momentum=0,weight_decay=weight_decay)
scheduler=lr_scheduler.StepLR(optimizer,step_size=step_size,gamma=gamma)


# In[23]:


def train():
def main():
    args = parse_args()

    torch.backends.cudnn.benchmark = True
    os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
    device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")

    # Random seed for reproducibility
    if args.seed:
        random.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)
        if args.gpu:
            torch.cuda.manual_seed_all(args.seed)

    # seed = 63
    # random.seed(seed)
    # np.random.seed(seed)
    # torch.manual_seed(seed)
    # # if args.gpu:
    # torch.cuda.manual_seed_all(seed)

    denoramlize_argument = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

    train_transforms = transforms.Compose([
        transforms.RandomCrop((args.crop_size, args.crop_size)),
        transforms.RandomRotation(90),
        transforms.RandomHorizontalFlip(p=0.5),
    ])

    # train_joint_transform = joint_transforms.Compose([
    # 	# joint_transforms.Scale(img_resize_shape),
    # 	joint_transforms.RandomCrop(args['crop_size']),
    # 	joint_transforms.RandomHorizontallyFlip(),
    # 	joint_transforms.RandomRotate(90)
    # ])

    img_resize_shape = int(min(args.input_size) / 0.8)
    # val_transforms = transforms.Compose([
    # 	transforms.Scale(img_resize_shape, interpolation=Image.NEAREST),
    # 	transforms.CenterCrop(args['input_size'])
    # 	])

    val_joint_transform = joint_transforms.Compose([
        joint_transforms.Scale(img_resize_shape),
        joint_transforms.CenterCrop(args.input_size)
    ])
    input_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    target_transform = extended_transforms.MaskToTensor()
    restore_transform = transforms.Compose([
        extended_transforms.DeNormalize(*denoramlize_argument),
        transforms.ToPILImage()
    ])
    visualize = transforms.ToTensor()

    # train_set = games_data.CityScapes('train', joint_transform=train_joint_transform,
    # 								  transform=input_transform, target_transform=target_transform)
    train_set = games_data.CityScapes('train', transform=train_transforms)
    # train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=8, shuffle=True)
    train_loader = DataLoader(train_set,
                              batch_size=args.training_batch_size,
                              num_workers=8,
                              shuffle=True)
    val_set = games_data.CityScapes('val',
                                    joint_transform=val_joint_transform,
                                    transform=input_transform,
                                    target_transform=target_transform)
    val_loader = DataLoader(val_set,
                            batch_size=args.val_batch_size,
                            num_workers=8,
                            shuffle=True)

    print(len(train_loader), len(val_loader))
    # sdf

    # Load pretrained VGG model
    vgg_model = VGGNet(requires_grad=True, remove_fc=True)

    # FCN architecture load
    model = FCN8s(pretrained_net=vgg_model,
                  n_class=games_data.num_classes,
                  dropout_rate=0.4)

    # Loss function
    criterion = nn.CrossEntropyLoss(ignore_index=games_data.ignore_label)

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=1e-4)

    # Create directory for checkpoints
    exist_directory(ckpt_path)
    exist_directory(os.path.join(ckpt_path, exp_name))
    open(
        os.path.join(ckpt_path, exp_name,
                     str(datetime.datetime.now()) + '.txt'),
        'w').write(str(args) + '\n\n')

    # Learning rate scheduler
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     'min',
                                                     patience=args.lr_patience,
                                                     min_lr=1e-10)

    # Send model to CUDA device
    vgg_model = vgg_model.to(device)
    model = model.to(device)

    # Use if more than 1 GPU
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)

    if len(args.snapshot) == 0:
        curr_epoch = 1
        best_args['best_record'] = {
            'epoch': 0,
            'val_loss': 1e10,
            'acc': 0,
            'acc_cls': 0,
            'mean_iu': 0
        }
    else:
        print('training resumes from ' + args['snapshot'])
        model.load_state_dict(
            torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'])))
        split_snapshot = args['snapshot'].split('_')
        curr_epoch = int(split_snapshot[1]) + 1
        best_args['best_record'] = {
            'epoch': int(split_snapshot[1]),
            'val_loss': float(split_snapshot[3]),
            'acc': float(split_snapshot[5]),
            'acc_cls': float(split_snapshot[7]),
            'mean_iu': float(split_snapshot[9][:-4])
        }

    criterion.to(device)

    for epoch in range(curr_epoch, args.epochs + 1):
        train(train_loader, model, device, criterion, optimizer, epoch, args)
        val_loss = validate(val_loader, model, device, criterion, optimizer,
                            epoch, args, restore_transform, visualize)
        scheduler.step(val_loss)
Пример #15
0
#val_data = CityscapesDataset( phase='val', flip_rate=0)

root_dir = "/shuju/huhao/synthia-cityscapes/RAND_CITYSCAPES/"
train_file = os.path.join(root_dir, "train.txt")
val_file = os.path.join(root_dir, "val.txt")
val_data = SynthiaCityscapesDataset(phase='val', flip_rate=0)
val_loader = DataLoader(val_data, batch_size=1, num_workers=8)
# create dir for model
model_dir = "da-models/"

model_path = os.path.join(model_dir, "test.pth")

use_gpu = torch.cuda.is_available()
num_gpu = list(range(torch.cuda.device_count()))

vgg_model = VGGNet(model='vgg19', requires_grad=True,
                   remove_fc=True)  #####change to 19
deconv_model = Deconv(n_class=n_class)  ####change to torch.load

if use_gpu:
    ts = time.time()
    vgg_model = vgg_model.cuda()
    vgg_model = nn.DataParallel(vgg_model, device_ids=num_gpu)
    deconv_model = deconv_model.cuda()
    deconv_model = nn.DataParallel(deconv_model, device_ids=num_gpu)
    #gan_model = gan_model.cuda()
    #gan_model = nn.DataParallel(gan_model, device_ids=num_gpu)

    print("Finish cuda loading, time elapsed {}".format(time.time() - ts))

vgg_model.load_state_dict(
    torch.load(model_dir + "vgg_model_epoch10.pth").state_dict())
Пример #16
0
    x = random.uniform(0, 1)
    cumulative_probability = 0.0
    for item, item_probability in zip(some_list, probabilities):
        cumulative_probability += item_probability
        if x < cumulative_probability:
            break
    return item


if __name__ == '__main__':
    # load model from

    # net = unet_model.UNet(1, 11)
    # net = unet_model.UNet_twoPart(1, (6, 5))
    # net = unet_model.UNet_double()
    vgg_model = VGGNet(requires_grad = True, pretrained=False)
    net = FCN8s(pretrained_net=vgg_model, n_class=11)

    net.float().cuda()
    net.eval()
    # torch.distributed.init_process_group(backend='nccl', init_method='tcp://localhost:8000', rank=0, world_size=1)
    # net = nn.parallel.DistributedDataParallel(net.float().cuda())
    if config_test['checkout'] != "":
        net.load_state_dict(torch.load(config_test['checkout']))
    else:
        print("choice the mode file please!")

    testDataset = myTestAdata(config_test['test_jsondir'], config_test['test_img_dir'], transforms=submitTestTransform)
    testDataLoader = DataLoader(testDataset, config_test['batch_size'], False)
    Loader_num = len(testDataLoader)
    all_pred_point = []
Пример #17
0
#
# train_data = Cityscapes(root, split='train', mode='fine',
#       target_type='semantic', transform=trans, target_transform=trans)
# train_loader = DataLoader(train_data, batch_size=batch_size,
#                           shuffle=True, num_workers=0)
#
# val_data = Cityscapes(root, split='val', mode='fine',
#     target_type='semantic', transform=trans, target_transform=trans)
# val_loader = DataLoader(val_data, batch_size=batch_size,
#     shuffle=True, num_workers=0)

# img, show = train_data[0]
# print(show)
#############################################

vgg_model = VGGNet(requires_grad=True, remove_fc=True)
fcn_model = FCNs(pretrained_net=vgg_model, n_class=n_class)

if use_gpu:
    ts = time.time()
    vgg_model = vgg_model.cuda()
    fcn_model = fcn_model.cuda()
    fcn_model = nn.DataParallel(fcn_model, device_ids=num_gpu)
    print("Finish cuda loading, time elapsed {}".format(time.time() - ts))

criterion = nn.BCEWithLogitsLoss()
optimizer = optim.RMSprop(fcn_model.parameters(),
                          lr=lr,
                          momentum=momentum,
                          weight_decay=w_decay)
scheduler = lr_scheduler.StepLR(
Пример #18
0
model_path = os.path.join(model_dir, configs)

use_gpu = torch.cuda.is_available()
num_gpu = list(range(torch.cuda.device_count()))

train_data = GTAVDataset(phase='train')
train_loader = DataLoader(train_data,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=8)

val_data = GTAVDataset(phase='val', flip_rate=0)
val_loader = DataLoader(val_data, batch_size=1, num_workers=8)

vgg_model = VGGNet(model='vgg19',
                   requires_grad=True,
                   remove_fc=True,
                   dataset="gtav")  #####change to 19
fcn_model = FCNs(pretrained_net=vgg_model, n_class=n_class)

if use_gpu:
    ts = time.time()
    vgg_model = vgg_model.cuda()
    fcn_model = fcn_model.cuda()
    fcn_model = nn.DataParallel(fcn_model, device_ids=num_gpu)
    print("Finish cuda loading, time elapsed {}".format(time.time() - ts))

criterion = nn.CrossEntropyLoss()
optimizer = optim.RMSprop(fcn_model.parameters(),
                          lr=lr,
                          momentum=momentum,
                          weight_decay=w_decay)