Esempio n. 1
0
    def prepare(self):
        self.train_set = FashionDataset(self.train_csv,
                                        transform=self.transform)
        self.test_set = FashionDataset(self.test_csv, transform=self.transform)

        self.train_loader = DataLoader(self.train_set,
                                       batch_size=100,
                                       num_workers=self.num_workers)
        self.test_loader = DataLoader(self.train_set,
                                      batch_size=100,
                                      num_workers=self.num_workers)

        self.model = FashionCNN()
        self.model.to(self.device)
Esempio n. 2
0
def train():
    set_seeds()
    logging.info('Loading masks...')
    with open(args.json_file, 'r') as f:
        masks = json.load(f)

    # for example use only 200 images
    filename = list(masks.keys())[:200]

    global devices, num_steps_per_epoch

    devices = (xm.get_xla_supported_devices(
        max_devices=args.num_cores) if args.num_cores != 0 else [])

    logging.info('Start training model')
    if args.model_name == 'deeplabv3_resnet50':
        m = torchvision.models.segmentation.deeplabv3_resnet50(False)
    else:
        m = torchvision.models.segmentation.fcn_resnet50(False)

    m.classifier[-1] = torch.nn.Conv2d(m.classifier[-1].in_channels, 46, 1)
    # wrapped for parallel training
    model = dp.DataParallel(m, device_ids=devices)

    ds = FashionDataset(filename,
                        masks,
                        path=args.data_path,
                        transform=train_transform,
                        size=(256, 256))
    loader = D.DataLoader(ds,
                          batch_size=args.batch_size,
                          shuffle=True,
                          num_workers=args.num_worker)

    num_steps_per_epoch = len(loader) // len(devices)

    for epoch in range(1, args.epochs + 1):

        train_loss = model(train_loop_fn, loader)
        train_loss = np.array(train_loss).mean()

        logging.info('[Epoch {:3d}] Train loss: {:.3f}'.format(
            epoch, train_loss))

    # Save weights
    state_dict = model.models[0].to('cpu').state_dict()
    torch.save(state_dict, args.save_file)
    logging.info('')
    logging.info('Model saved\n')
Esempio n. 3
0
    lines = []
    for o in order:
        m = np.where(mask == o, 1, 0)
        # Skip if empty
        if m.sum() == 0.0:
            continue
        rle = rle_encode(m)
        lines.append((rle, labels[o]))
    return lines



num_classes = 46 + 1


dataset_test = FashionDataset("../input/test/", "../input/sample_submission.csv", 1024, 1024,
                               folds=[], transforms=None)

sample_df = pd.read_csv("../input/sample_submission.csv")


model_ft = get_instance_segmentation_model(num_classes)
model_ft.load_state_dict(torch.load("model.bin"))
model_ft = model_ft.to(device)

for param in model_ft.parameters():
    param.requires_grad = False

model_ft.eval()


sub_list = []
Esempio n. 4
0
    transforms = []
    # converts the image, a PIL image, into a PyTorch Tensor
    transforms.append(T.ToTensor())
    if train:
        # during training, randomly flip the training images
        # and ground-truth for data augmentation
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)


num_classes = 46 + 1
device = torch.device('cuda:0')

dataset_train = FashionDataset("../input/train/",
                               "../input/train_kfolds.csv",
                               1024,
                               1024,
                               folds=[0, 1, 2, 3, 4],
                               transforms=get_transform(train=True))
#dataset_val = FashionDataset("../input/train/", "../input/train_kfolds.csv", 512, 512,
#                             folds=[0], transforms=get_transform(train=False))

model_ft = get_instance_segmentation_model(num_classes)
model_ft.to(device)

# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(dataset_train,
                                          batch_size=4,
                                          shuffle=True,
                                          num_workers=8,
                                          collate_fn=utils.collate_fn)
Esempio n. 5
0
# prepare dataset
trans = transforms.Compose(
    [transforms.Resize((64, 64)),
     transforms.ToTensor()])


class TargetTrans():
    def __self__(self):
        pass

    def __call__(self, y):
        return np.eye(7)[y]


train_dataset = FashionDataset(args.data_dir,
                               transform=trans,
                               target_transform=TargetTrans())
train_loader = data_utils.DataLoader(train_dataset,
                                     args.batch_size,
                                     shuffle=True,
                                     num_workers=1)
test_dataset = FashionDataset(args.data_dir,
                              transform=trans,
                              target_transform=TargetTrans(),
                              train=False)
test_loader = data_utils.DataLoader(test_dataset,
                                    args.batch_size,
                                    shuffle=False,
                                    num_workers=1)

Esempio n. 6
0
        transforms.RandomAffine(degrees=20,
                                translate=(0.1, 0.1),
                                scale=(0.8, 1.2),
                                shear=None,
                                resample=False,
                                fillcolor=(255, 255, 255)),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])

    # during validation we use only tensor and normalization transforms
    val_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])

    train_dataset = FashionDataset('./fashion-product-images/train.csv',
                                   attributes, train_transform)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=num_workers)

    val_dataset = FashionDataset('./fashion-product-images/val.csv',
                                 attributes, val_transform)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=num_workers)

    model = MultiOutputModel(n_color_classes=attributes.num_colors,
                             n_gender_classes=attributes.num_genders,
                             n_article_classes=attributes.num_articles)\
Esempio n. 7
0
                        type=str,
                        default='cuda',
                        help="Device: 'cuda' or 'cpu'")
    args = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available()
                          and args.device == 'cuda' else "cpu")
    # attributes variable contains labels for the categories in the dataset and mapping between string names and IDs
    attributes = AttributesDataset(args.attributes_file)

    # during validation we use only tensor and normalization transforms
    val_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])

    test_dataset = FashionDataset('./val.csv', attributes, val_transform)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=64,
                                 shuffle=False,
                                 num_workers=8)

    model = MultiOutputModel(
        n_color_classes=attributes.num_colors,
        n_gender_classes=attributes.num_genders,
        n_article_classes=attributes.num_articles).to(device)

    # Visualization of the trained model
    visualize_grid(model,
                   test_dataloader,
                   attributes,
                   device,
Esempio n. 8
0
        transforms.RandomAffine(degrees=20,
                                translate=(0.1, 0.1),
                                scale=(0.8, 1.2),
                                shear=None,
                                resample=False,
                                fillcolor=(255, 255, 255)),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])

    #during validation we use only tensor and normalization transforms
    val_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(mean, std)])

    train_dataset = FashionDataset('./train.csv', attributes, train_transform)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=num_workers)

    val_dataset = FashionDataset('./val.csv', attributes, val_transform)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=batch_size,
                                shuffle=False,
                                num_workers=num_workers)

    model = MultiOutputModel(
        n_color_classes=attributes.num_colors,
        n_gender_classes=attributes.num_genders,
        n_article_classes=attributes.num_articles).to(device)
Esempio n. 9
0
    train_transform = transforms.Compose([
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0),
        transforms.RandomAffine(degrees=20, translate=(0.1, 0.1), scale=(0.8, 1.2),
                                shear=None, resample=False, fillcolor=(255, 255, 255)),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])

    # during validation we use only tensor and normalization transforms
    val_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])

    train_dataset = FashionDataset('/home/vrushali/Image Reco/trainn.csv', attributes, train_transform)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)

    #val_dataset = FashionDataset('/home/vrushali/Image Reco/val1.csv', attributes, val_transform)
    #val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)

    model = MultiOutputModel(n_color_classes=attributes.num_colors,
                             n_gender_classes=attributes.num_genders,
                             n_article_classes=attributes.num_articles).to(device)

    optimizer = torch.optim.Adam(model.parameters())

    logdir = os.path.join('./logs/', get_cur_time())
    savedir = os.path.join('./checkpoints/', get_cur_time())
    os.makedirs(logdir, exist_ok=True)
    os.makedirs(savedir, exist_ok=True)
 def test_dataset(self):
     # Test when directory dataset doesn't exist
     dataset = FashionDataset()
     self.assertRaises(ValueError, dataset.load_data, "test.json",
                       "datasets")
Esempio n. 11
0
    return accuracy_color, accuracy_gender, accuracy_article


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Inference pipeline')
    parser.add_argument('--checkpoint', type=str, default='/home/vrushali/Image Reco/checkpoints/2020-04-25_21-54/checkpoint-000048.pth', required=True, help="Path to the checkpoint")
    parser.add_argument('--attributes_file', type=str, default='/home/vrushali/Image Reco/fashion-product-images-small/styles.csv',
                        help="Path to the file with attributes")
    parser.add_argument('--device', type=str, default='cpu',
                        help="Device: 'cuda' or 'cpu'")
    args = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() and args.device == 'cuda' else "cpu")
    # attributes variable contains labels for the categories in the dataset and mapping between string names and IDs
    attributes = AttributesDataset(args.attributes_file)

    # during validation we use only tensor and normalization transforms
    val_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])

    test_dataset = FashionDataset('/home/vrushali/Image Reco/val.csv', attributes, val_transform)
    test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=False, num_workers=8)

    model = MultiOutputModel(n_color_classes=attributes.num_colors, n_gender_classes=attributes.num_genders,
                             n_article_classes=attributes.num_articles).to(device)

    # Visualization of the trained model
    visualize_grid(model, test_dataloader, attributes, device, checkpoint=args.checkpoint)