Exemple #1
0
def main(config):
    # Set device based on user defined configuration.
    if config.verbose >= 2:
        print(config)

    device = torch.device('cpu') if config.gpu_id < 0 else torch.device(
        'cuda:%d' % config.gpu_id)

    model = get_model(config)
    model = model.to(device)

    train_loader, valid_loader, test_loader = get_loaders(config)

    print("Train:", len(train_loader.dataset))
    print("Valid:", len(valid_loader.dataset))
    print("Test:", len(test_loader.dataset))

    optimizer = optim.Adam(model.parameters())
    crit = nn.MSELoss()

    if config.verbose >= 2:
        print(model)
        print(optimizer)
        print(crit)

    trainer = Trainer(config)
    trainer.train(model, crit, optimizer, train_loader, valid_loader)
Exemple #2
0
def main(config):
    # For fast training
    cudnn.benchmark = True

    # create logging folders
    if not os.path.exists(config.output_path):
        os.makedirs(config.output_path)
    subfolders = ['logs', 'samples', 'models', 'results']
    for subfolder in subfolders:
        subfolder_path = os.path.join(config.output_path, subfolder,
                                      config.output_name)
        if not os.path.exists(subfolder_path):
            os.makedirs(subfolder_path)

    print_logger = create_logger(
        os.path.join(
            config.output_path, 'logs', config.output_name,
            'train{}.log'.format(datetime.now().strftime("%Y%m%d-%H%M%S"))))
    print_logger.info('============ Initialized logger ============')
    print_logger.info('\n'.join(
        '%s: %s' % (k, str(v)) for k, v in sorted(dict(vars(config)).items())))

    # Data loader
    data_loaders = get_loaders(config.root, config.attrs, config.categories,
                               config.image_size, config.batch_size)

    # Solver
    solver = Solver(data_loaders, config)

    if config.mode == 'train':
        solver.train()
    elif config.mode == 'test':
        solver.test()
Exemple #3
0
def main(config):
    device = torch.device('cpu') if config.gpu_id < 0 else torch.device('cuda:%d' % config.gpu_id)

    train_loader, valid_loader, test_loader = get_loaders(config)

    print("Train:", len(train_loader.dataset))
    print("Valid:", len(valid_loader.dataset))
    print("Test:", len(test_loader.dataset))

    model = ImageClassifier(28**2, 10).to(device)
    optimizer = optim.Adam(model.parameters())
    crit = nn.CrossEntropyLoss()

    trainer = Trainer(config)
    trainer.train(model, crit, optimizer, train_loader, valid_loader)
Exemple #4
0
def main(config):
    device = torch.device('cpu') if config.gpu_id < 0 else torch.device(
        'cuda : %d' % config.gpu_id)

    train_loader, valid_loader, test_loader = get_loaders(config)
    # get_loader를 통해 데이터를 받아옴

    print('Train', len(train_loader.dataset))
    print('Valid', len(valid_loader.dataset))
    print('Test', len(test_loader.dataset))

    model = ImageClassifier(28**2,
                            10).to(device)  # input = (784)이고 10개의 클래스로 분류할 것
    optimizer = optim.Adam(model.parameters())
    crit = nn.CrossEntropyLoss()  # Loss Function

    trainer = Trainer(config)
    trainer.train(model, crit, optimizer, train_loader, valid_loader)
Exemple #5
0
def main(config):
    # Set device based on user defined configuration.
    device = torch.device('cpu') if config.gpu_id < 0 else torch.device(
        'cuda:%d' % config.gpu_id)

    train_loader, valid_loader, test_loader = get_loaders(config)

    print("Train:", len(train_loader.dataset))
    print("Valid:", len(valid_loader.dataset))
    print("Test:", len(test_loader.dataset))

    model = ImageClassifier(28**2, 10).to(
        device)  # 입력은 784, 10개 클래스로 분류, GPU면 여기로 옮겨줘?
    optimizer = optim.Adam(model.parameters(
    ))  # model.parameters() 하면 모델 내 웨이트 파라미터들이 이터레이티브하게 나온다
    crit = nn.CrossEntropyLoss()  # 분류를 해야하기 때문에 교차엔트로피 쓴다, 모델에서 소프트맥스로 끝나는 이유

    trainer = Trainer(config)  # 이그나이트를 써서 짜놓은 메서드
    trainer.train(model, crit, optimizer, train_loader, valid_loader)
Exemple #6
0
def run(dataset="CIFAR10", epochs=30):
    trainloader, testloader, num_classes = data_loader.get_loaders(dataset)

    net = network.AlexNet(num_classes)
    net.to(device)

    accuracies, losses = trainer.train(net,
                                       trainloader,
                                       testloader,
                                       epochs=epochs)

    x = np.linspace(0, 1, len(losses))

    mlp.plot(losses, x)
    mlp.show()

    x = np.linspace(0, 1, len(accuracies))

    mlp.plot(losses, accuracies)
    mlp.show()
Exemple #7
0
def main():
    parser = get_parser()
    args = parser.parse_args()

    # Setup DataLoader
    if args.local:
        ROOT_PATH = os.path.join(args.local_dataset_path, 'train') # Root directory for the dataset
    else:
        ROOT_PATH = os.path.join(DATASET_PATH, 'train') # Root directory for the dataset
    loaders = get_loaders(root_path = ROOT_PATH,
                          batch_size = args.batch_size,
                          shuffle = args.shuffle,
                          c_mode = args.c_mode,
                          s_mode = args.s_mode)

    # Run training
    model = DeepSteg(batch_size = args.batch_size, im_dim = (255,255),
                     c=args.c_mode, s=args.s_mode, skip=args.skip)        # TODO fix (255,255) when data is replaced

    train(model = model, train_loader = loaders['train'], args = args)
    print("all successfully completed")
Exemple #8
0
    arg = parser.add_argument
    add_args(parser)
    args = parser.parse_args()
    model_name = args.model

    Path(args.root).mkdir(exist_ok=True, parents=True)

    batch_size = args.batch_size

    train_df = get_df('train')
    val_df = get_df('val')

    print(train_df.shape, val_df.shape)

    train_loader, valid_loader = data_loader.get_loaders(batch_size, args, train_df=train_df, valid_df=val_df,
                                                         train_transform=train_transform, val_transform=val_transform)

    num_classes = data_loader.num_classes

    # model = models.ResNetFinetune(num_classes, net_cls=models.M.resnet34, dropout=True)
    model = models.DenseNetFinetune(num_classes, net_cls=models.M.densenet201, two_layer=True)
    model = utils.cuda(model)

    if utils.cuda_is_available:
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None

        model = nn.DataParallel(model, device_ids=device_ids).cuda()
 parser.add_argument('--data_dir', type=pathlib.Path, help='The directory where image and embedding pickle files can be found')
 parser.add_argument('--output_dir', type=pathlib.Path, help='Output')
 args = parser.parse_args()
           
 if not os.path.exists(args.data_dir):
     raise Exception('Not a valid path to find image and embedding pickle files: {args.data_dir}')
 if not os.path.exists(args.output_dir):
     os.makedirs(args.output_dir)
 
 # Create parameters and model
 model_path = os.path.join(args.output_dir, f'best_cross_domain_adaptation.pth')
 batch_size = 128
 num_workers = 2
 model = models.alexnet(pretrained=True)
 
 # Training
 print(f'Cross-domain adaptation: training')
 coco_data_loaders = get_loaders(args.data_dir, "coco", "glove", batch_size, num_workers)
 news_data_loaders = get_loaders(args.data_dir, "news", "glove", batch_size, num_workers)
 train(args.epochs, model, model_path, coco_data_loaders, news_data_loaders, batch_size)
 model.load_state_dict(torch.load(model_path))
 
 # Testing COCO
 print(f'Testing COCO')
 image_to_text, text_to_image = get_test_results(model, coco_data_loaders['test'])
 print(f'COCO Model accuracy: image-to-text {image_to_text}; text-to-image {text_to_image}')
 
 # Testing Good News
 print(f'Testing Good News')
 image_to_text, text_to_image = get_test_results(model, news_data_loaders['test'])
 print(f'Good News Model accuracy: image-to-text {image_to_text}; text-to-image {text_to_image}')    
     raise Exception('Expected "coco" or "news" as image data set, found {args.image_data_set}')
           
 if not os.path.exists(args.data_dir):
     raise Exception('Not a valid path to find image and embedding pickle files: {args.data_dir}')
 if not os.path.exists(args.output_dir):
     os.makedirs(args.output_dir)
 
 # Create parameters and model
 model_path = os.path.join(args.output_dir, f'{args.model}_{args.embedding}_{args.epochs}_{args.image_data_set}.pth')
 batch_size = 128
 num_workers = 2
 model = models.alexnet(pretrained=True) if args.model == "alex" else models.resnet18(pretrained=True)
 
 # Training
 print(f'Training with model {args.model}, embedding {args.embedding}, image data set {args.image_data_set}')
 data_loaders = get_loaders(args.data_dir, args.image_data_set, args.embedding, batch_size, num_workers)
 train(args.epochs, model, data_loaders, model_path)
 
 # Testing
 print(f'Testing with model {args.model}, embedding {args.embedding}, image data set {args.image_data_set}')
 model.load_state_dict(torch.load(model_path))
 image_to_text, text_to_image = get_test_results(model, data_loaders['test'])
 print(f'Model accuracy: image-to-text {image_to_text}; text-to-image {text_to_image}')
 
 # Cross-Domain Testing
 if args.cross_domain_eval:
     img_data_set = "news" if args.image_data_set == "coco" else "coco"
     print(f'Testing with model {args.model}, embedding {args.embedding}, image data set {img_data_set}')
     cd_data_loader = get_loaders(args.data_dir, img_data_set, args.embedding, batch_size, num_workers)['train']
     image_to_text, text_to_image = get_test_results(model, cd_data_loader)
     print(f'Model accuracy: image-to-text {image_to_text}; text-to-image {text_to_image}')   
    model_name = args.model

    batch_size = args.batch_size

    train_transform = transforms.Compose([
        transforms.RandomSizedCrop(224),
        augmentations.D4(),
        # augmentations.Rotate(),
        # augmentations.GaussianBlur(),
        augmentations.Add(-5, 5, per_channel=True),
        augmentations.ContrastNormalization(0.8, 1.2, per_channel=True),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    train_loader, valid_loader = data_loader.get_loaders(batch_size, train_transform=train_transform, fold=args.fold)

    num_classes = 17

    # model = get_model(num_classes, model_name)

    # model = getattr(models, args.model)(num_classes=num_classes)
    model = get_model(num_classes, model_name)
    model = utils.cuda(model)

    if utils.cuda_is_available:
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
width, height, depth = (28, 28, 1)
img_size = width*height # canvas
batch_size = 100 
epochs = 10000
lr = 1e-3
eps = 1e-8

glimpses = 64
read_glimpse = 2 # table 3 
read_glimpse_classification = 12 # table 3
write_glimpse = 5 # table 3
z_size = 100 # table 3
num_units_lstm = 256 # table 3

# load data 
train_loader, test_loader = data_loader.get_loaders(64, 4)

# Model definition
class LSTMVAENETWORK(nn.Module):
    def __init__(self, input_size, num_units_lstm):
        super(LSTMVAENETWORK, self).__init__()

        # take the input
        self.encoderrnn = nn.LSTM(input_size, num_units_lstm)

        # a few linear layers needed for reparameterize trick
        self.mu = nn.Linear(num_units_lstm, z_size)
        self.logvar = nn.Linear(num_units_lstm, z_size)

        # take the sampled output and regenerate the image
        self.decoderrnn = nn.LSTM(z_size, input_size)
Exemple #13
0
    model_name = args.model

    batch_size = args.batch_size

    train_transform = transforms.Compose([
        transforms.RandomSizedCrop(224),
        augmentations.D4(),
        # augmentations.Rotate(),
        # augmentations.GaussianBlur(),
        augmentations.Add(-5, 5, per_channel=True),
        augmentations.ContrastNormalization(0.8, 1.2, per_channel=True),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    train_loader, valid_loader = data_loader.get_loaders(
        batch_size, train_transform=train_transform, fold=args.fold)

    num_classes = 17

    # model = get_model(num_classes, model_name)

    # model = getattr(models, args.model)(num_classes=num_classes)
    model = get_model(num_classes, model_name)
    model = utils.cuda(model)

    if utils.cuda_is_available:
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
    batch_size = args.batch_size

    train_transform = transforms.Compose([
        transforms.RandomCrop(160),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    val_transform = transforms.Compose([
        transforms.CenterCrop(160),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    train_loader, valid_loader = data_loader.get_loaders(batch_size, args, train_transform=train_transform,
                                                         valid_transform=val_transform)

    num_classes = 5270

    model = models.ResNetFinetune(num_classes, net_cls=models.M.resnet50)
    # model = models.DenseNetFinetune(models.densenet121)
    model = utils.cuda(model)

    if utils.cuda_is_available:
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None

        model = nn.DataParallel(model, device_ids=device_ids).cuda()