Ejemplo n.º 1
0
 def __init__(self):
     # input rgbd image in numpy array format [w h c]
     self.sdmrcnn_model = get_model_instance_segmentation(2).to(
         device, dtype=torch.float)
     self.sdmrcnn_model.load_state_dict(torch.load(os.path.join('19.pth')))
     self.sdmrcnn_model.eval()
     self.siamese_model = SiameseNetwork().cuda()
     self.siamese_model.load_state_dict(torch.load('siamese.pt'))
     self.siamese_model.eval()
Ejemplo n.º 2
0
opt.path2model = 'C:/MyPrograms/saved_models/ClothCoParse/mask_rcnn-Apr-4-at-15-45/'  # keep background

opt.person_detection = False

opt.HPC_run = 0
opt.remove_background = True
opt.train_percentage = 0.5
opt.batch_size = 1
opt.train_shuffle = 0
opt.n_cpu = 0
opt.cuda = True  # this will definetly work on the cpu if it is false

opt.load_via_GUI = True

device = torch.device('cuda' if opt.cuda else 'cpu')
model = get_model_instance_segmentation(number_of_classes(opt))
print("loading model", opt.model_name)
model.load_state_dict(
    torch.load(opt.path2model + opt.model_name, map_location=device))
model.to(device)
model.eval()
data_loader, data_loader_test = get_dataloaders(opt)

if opt.load_via_GUI:
    image_name = get_any_image()
    instance_segmentation_api(model,
                              image_name,
                              device,
                              threshold=0.7,
                              rect_th=1,
                              text_size=1,
Ejemplo n.º 3
0
def main():
    # train on the GPU or on the CPU, if a GPU is not available
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    # our dataset has two classes only - background and person
    num_classes = 2
    # use our dataset and defined transformations
    dataset = PennFudanDatasets('E:\\zflPro\\PennFudanPed\\',
                                get_transform(train=True))
    dataset_test = PennFudanDatasets('E:\\zflPro\\PennFudanPed\\',
                                     get_transform(train=False))

    # split the dataset in train and test set
    indices = torch.randperm(len(dataset)).tolist()
    dataset = torch.utils.data.Subset(dataset, indices[:-50])
    dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])

    # define training and validation data loaders
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=2,
                                              shuffle=True,
                                              num_workers=4,
                                              collate_fn=utils.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=4,
                                                   collate_fn=utils.collate_fn)

    # get the model using our helper function
    model = get_model_instance_segmentation(num_classes)

    # move model to the right device
    model.to(device)

    # construct an optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)
    # and a learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=3,
                                                   gamma=0.1)

    # let's train it for 10 epochs
    num_epochs = 10

    for epoch in range(num_epochs):
        # train for one epoch, printing every 10 iterations
        train_one_epoch(model,
                        optimizer,
                        data_loader,
                        device,
                        epoch,
                        print_freq=10)
        # update the learning rate
        lr_scheduler.step()
        # evaluate on the test dataset
        evaluate(model, data_loader_test, device=device)

    print("That's it!")
Ejemplo n.º 4
0
def main():
    global hparams, args
    # train on the GPU or on the CPU, if a GPU is not available
    device = torch.device(
        hparams.device) if torch.cuda.is_available() else torch.device('cpu')

    if (args.dataset == 'malaria'):
        hparams.dataset_root = 'malaria'
        hparams.exp_name = f'maskrcnn-{hparams.dataset_root}'
        dataset = MalariaDataset(hparams.train_dir, hparams.train_csv,
                                 get_transform(train=True))
        dataset_test = MalariaDataset(hparams.test_dir, hparams.test_csv,
                                      get_transform(False))
    else:
        dataset = PennFudanDataset('PennFudanPed', get_transform(train=True))
        dataset_test = PennFudanDataset('PennFudanPed',
                                        get_transform(train=False))
        hparams.num_classes = 2

    writer = SummaryWriter(f'runs/{hparams.exp_name}_{hparams.timestamp}')

    # define training and validation data loaders
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=2,
                                              shuffle=True,
                                              num_workers=4,
                                              collate_fn=utils.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=4,
                                                   collate_fn=utils.collate_fn)

    # get the model using our helper function
    model = get_model_instance_segmentation(hparams)

    # move model to the right device
    model.to(device)

    model_without_ddp = model
    if hparams.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[hparams.device_ids])
        model_without_ddp = model.module

    # construct an optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)
    # and a learning rate scheduler
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=3,
                                                   gamma=0.1)

    # let's train it for 10 epochs
    num_epochs = hparams.num_epochs

    for epoch in range(num_epochs):
        # train for one epoch, printing every 10 iterations
        train_one_epoch(model,
                        optimizer,
                        data_loader,
                        device,
                        epoch,
                        print_freq=10,
                        writer=writer)
        # update the learning rate
        lr_scheduler.step()
        # evaluate on the test dataset
        evaluate(model, data_loader_test, device=device)

        torch.save(
            {
                'model': model_without_ddp.state_dict(),
                'optimizer': optimizer.state_dict(),
                'epoch': epoch
            }, os.path.join(hparams.model_dir, 'model_{}.pth'.format(epoch)))

    print("That's it!")
Ejemplo n.º 5
0
cnf.clsuter_1D_method='Diff'# 'MeanSift'  # {'MeanSift', 'Diff', '2nd_fcm'}

dataset_name = '' # 'ClothCoParse'
cnf.model = 'ModaNet' # {'ModaNet', 'ClothCoParse'}

chanel_data = ChenelDataset(transforms_=None, model_nm = cnf.model)

if cnf.model == 'ModaNet':
    cnf.model_name='maskrcnn_50.pth'
    cnf.path2model = 'C:/MyPrograms/saved_models/Modanet/mask_rcnn-Apr-26-at-15-58/'
else:    
    cnf.model_name='maskrcnn_700.pth'
    cnf.path2model = 'C:/MyPrograms/saved_models/ClothCoParse/mask_rcnn-Apr-8-at-2-8/' # keep bkgrnd

device = torch.device('cuda' if cnf.cuda else 'cpu')
model = get_model_instance_segmentation(chanel_data.number_of_classes()) 
print("loading model", cnf.model_name )        
model.load_state_dict(torch.load(cnf.path2model+cnf.model_name,  map_location=device ))  
model.to(device)
model.eval()


cnf.num_tsts= 1000 # inf


for i, item in enumerate(chanel_data):
    if i >= cnf.num_tsts: break
    image = item[0]    
    masks, labels = instance_segmentation_api(model, image, device, chanel_data.class_names, 
                                              threshold=0.7, rect_th=1, text_size=1, text_th=3)    
    masked_img = []
Ejemplo n.º 6
0
if  opt.redirect_std_to_file:    
    out_file_name = "saved_models/%s" % opt.experiment_name
    print('Output sent to ', out_file_name)
    sys.stdout = open(out_file_name+'.txt',  'w')

print(opt)


# train on the GPU or on the CPU, if a GPU is not available
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

# use our dataset and defined transformations
data_loader, data_loader_test, num_classes = get_dataloaders(opt)

model = get_model_instance_segmentation( num_classes, pretrained_model=opt.pretrained_model )    
if opt.epoch != 0:
    # Load pretrained models
    print("loading model %s maskrcnn_%d.pth" % (opt.experiment_name, opt.epoch) )        
    model.load_state_dict(torch.load("saved_models/%s/maskrcnn_%d.pth" % (opt.experiment_name, opt.epoch)))
   
model.to(device)

# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=opt.lr, momentum=0.9, weight_decay=0.0005)
# and a learning rate scheduler
if opt.lr_scheduler=='StepLR':
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
elif opt.lr_scheduler=='CyclicLR':
    lr_scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.001, max_lr=0.05)
Ejemplo n.º 7
0
from my_dataset import PennFudanDatasets
from models import get_model_instance_segmentation, get_transform
from PIL import Image

dataset_test_real = PennFudanDatasets('PennFudanPed',
                                      get_transform(train=False))
torch.manual_seed(1)
indices = torch.randperm(len(dataset_test_real)).tolist()
dataset_test = torch.utils.data.Subset(dataset_test_real, indices[:])
# pick one image from the test set
img, _ = dataset_test[0]

device = torch.device('cuda') if torch.cuda.is_available() else torch.device(
    'cpu')

num_classes = 2
# get the model using the helper function
model = get_model_instance_segmentation(num_classes)
# move model to the right device
model.to(device)

# put the model in evaluation mode
model.eval()
with torch.no_grad():
    prediction = model([img.to(device)])
    print(prediction)
    # info:输出信息
    # vis:可视化
    # Image.fromarray(img.mul(255).permute(1, 2, 0).byte().numpy())
    # Image.fromarray(prediction[0]['masks'][0, 0].mul(255).byte().cpu().numpy())