Ejemplo n.º 1
0
def remove_background(img):
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    segmentation_model_path = './models/people_segmentation.pth'

    num_classes = 2

    model = get_instance_segmentation_model(num_classes)
    model.load_state_dict(torch.load(segmentation_model_path))
    model.to(device)
    trans = transforms.ToTensor()
    img = trans(img)

    model.eval()
    with torch.no_grad():
        prediction = model([img.to(device)])

    img = img.mul(255).permute(1, 2, 0).byte().numpy()
    pred_mask = prediction[0]['masks'][0, 0].mul(255).byte().cpu().numpy()

    if prediction[0]['scores'][0] < 0.8:
        return img

    pred = np.squeeze(pred_mask)
    background = pred > 0.6 * 255
    background = np.expand_dims(background, 2)
    removed = background * img

    b, g, r = cv2.split(removed)
    rimg = cv2.merge([r, g, b])

    return removed
Ejemplo n.º 2
0
        rle = rle_encode(m)
        lines.append((rle, labels[o]))
    return lines



num_classes = 46 + 1


dataset_test = FashionDataset("../input/test/", "../input/sample_submission.csv", 1024, 1024,
                               folds=[], transforms=None)

sample_df = pd.read_csv("../input/sample_submission.csv")


model_ft = get_instance_segmentation_model(num_classes)
model_ft.load_state_dict(torch.load("model.bin"))
model_ft = model_ft.to(device)

for param in model_ft.parameters():
    param.requires_grad = False

model_ft.eval()


sub_list = []
missing_count = 0
submission = []
ctr = 0

tk0 = tqdm(range(3200))
Ejemplo n.º 3
0
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
    dataset, batch_size=4, shuffle=True, num_workers=0,
    collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
    dataset_test, batch_size=1, shuffle=True, num_workers=0,
    collate_fn=utils.collate_fn)

# the dataset has two classes only - background and person
num_classes = 2

# get the model using the helper function
#bone: 'resnet50'/'mobilenet_v2'/'googlenet'/'densenet121'/'resnet50'/'shufflenet_v2_x1_0'/'inception_v3'/'squeezenet1_0'/
#attention:True/False
model = get_instance_segmentation_model(bone='resnet50',attention=True)
model.cuda()
model = nn.DataParallel(model)

# move model to the right device
#model.load_state_dict(torch.load('/disk2/yzy_cell/cell_model_densenet'))
model.to(device)

# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.0005,
                            momentum=0.9, weight_decay=0.0005)

# the learning rate scheduler decreases the learning rate by 10x every 3 epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                               step_size=20,
Ejemplo n.º 4
0
    counts = {}
    for i in range(sequence.shape[0]):
        for x in sequence[i]:
            if x in counts:
                counts[x] += 1
            else:
                counts[x] = 1
    return counts


# the dataset has two classes only - background and person
num_classes = 2

# get the model using the helper function
#bone: 'resnet50'/'mobilenet_v2'/'googlenet'/'densenet121'/'resnet50'/'shufflenet_v2_x1_0'/'inception_v3'/'squeezenet1_0'/
model = get_instance_segmentation_model(bone='densenet121', attention=False)
model.cuda()
model = nn.DataParallel(model)

# move model to the right device
model.to(device)


def test(img, m):
    with torch.no_grad():
        prediction = model([img.to(device)])

    Image.fromarray(img.mul(255).permute(
        1, 2, 0).byte().numpy()).save('./eval/image/' + str(m) + '.png')

    mask = prediction[0]['masks'].mul(255).byte().cpu().numpy()
Ejemplo n.º 5
0
                                          collate_fn=utils.collate_fn)

data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                               batch_size=2,
                                               shuffle=False,
                                               num_workers=2,
                                               collate_fn=utils.collate_fn)

device = torch.device(
    f'cuda:{args.GPU}') if torch.cuda.is_available() else torch.device('cpu')

# our dataset has two classes only - background and person
num_classes = 21

# get the model using our helper function
model = get_instance_segmentation_model(num_classes, args.backbone,
                                        args.dropout)
# move model to the right device
model.to(device)

# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params,
                            lr=0.005,
                            momentum=0.9,
                            weight_decay=0.0005)

# and a learning rate scheduler which decreases the learning rate by
# 10x every 3 epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                               step_size=3,
                                               gamma=0.1)
Ejemplo n.º 6
0
def main():
    print("Torch version:", torch.__version__)
    # get command-line arguments
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name',
                        type=str,
                        default="pytorch-peds.pt",
                        help='name with which to register your model')
    parser.add_argument('--output_dir',
                        default="local-outputs",
                        type=str,
                        help='output directory')
    parser.add_argument('--n_epochs',
                        type=int,
                        default=10,
                        help='number of epochs')
    args = parser.parse_args()

    # In case user inputs a nested output directory
    os.makedirs(name=args.output_dir, exist_ok=True)

    # Get a dataset by name
    root_dir = download_data()

    # use our dataset and defined transformations
    dataset = PennFudanDataset(root=root_dir,
                               transforms=get_transform(train=True))
    dataset_test = PennFudanDataset(root=root_dir,
                                    transforms=get_transform(train=False))

    # split the dataset in train and test set
    torch.manual_seed(1)
    indices = torch.randperm(len(dataset)).tolist()
    dataset = torch.utils.data.Subset(dataset, indices[:-50])
    dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])

    # define training and validation data loaders
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=2,
                                              shuffle=True,
                                              num_workers=4,
                                              collate_fn=utils.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=4,
                                                   collate_fn=utils.collate_fn)

    if torch.cuda.is_available():
        print('Using GPU')
        device = torch.device('cuda')
    else:
        print('Using CPU')
        device = torch.device('cpu')

    # our dataset has two classes only - background and person
    num_classes = NUM_CLASSES

    # get the model using our helper function
    model = get_instance_segmentation_model(num_classes)

    # move model to the right device
    model.to(device)

    # construct an optimizer
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)

    # and a learning rate scheduler which decreases the learning rate by
    # 10x every 3 epochs
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=3,
                                                   gamma=0.1)

    for epoch in range(args.n_epochs):
        # train for one epoch, printing every 10 iterations
        train_one_epoch(model,
                        optimizer,
                        data_loader,
                        device,
                        epoch,
                        print_freq=10)
        # update the learning rate
        lr_scheduler.step()
        # evaluate on the test dataset
        evaluate(model, data_loader_test, device=device)

    # Saving the state dict is recommended method, per
    # https://pytorch.org/tutorials/beginner/saving_loading_models.html
    torch.save(model.state_dict(),
               os.path.join(args.output_dir, args.model_name))