Esempio n. 1
0
def main(argv):
    """Run the training script with command line arguments @argv."""
    args = parse_args(argv)
    utils.general_setup(args.save, args.gpus)

    logging.info("Arguments parsed.\n{}".format(pprint.pformat(vars(args))))

    # Create the train and the validation data loaders.
    train_loader = imagenet.get_train_loader(args.imagenet, args.batch_size,
                                             args.num_workers, args.image_size)
    val_loader = imagenet.get_val_loader(args.imagenet, args.batch_size,
                                         args.num_workers, args.image_size)
    # Create model with optional teachers.
    model, loss = model_factory.create_model(
        args.model, args.student_state_file, args.gpus, args.teacher_model,
        args.teacher_state_file, False)
    logging.info("Model:\n{}".format(model))

    discriminator_loss, update_parameters = create_discriminator_criterion(args)

    if args.lr_regime is None:
        lr_regime = model.LR_REGIME
    else:
        lr_regime = args.lr_regime
    regime = LearningRateRegime(lr_regime)
    # Train and test for needed number of epochs.
    optimizer = create_optimizer(model, update_parameters, args.momentum, args.weight_decay)

    for epoch in range(args.start_epoch, args.epochs):
        lr = regime.get_lr(epoch)
        _set_learning_rate(optimizer, lr)
        train_for_one_epoch(model, loss, discriminator_loss, train_loader, optimizer, epoch, args)
        test.test_for_one_epoch(model, loss, val_loader, epoch)
        save_checkpoint(args.save, model, optimizer, epoch)
Esempio n. 2
0
def main(argv):
    """Run the training script with command line arguments @argv."""
    args = parse_args(argv)
    utils.general_setup(args.save, args.gpus)

    logging.info("Arguments parsed.\n{}".format(pprint.pformat(vars(args))))

    # convert to TRUE number of loading-images since we use multiple crops from the same image within a minbatch
    args.batch_size = math.ceil(args.batch_size / args.num_crops)

    # Create the train and the validation data loaders.
    train_loader = imagenet.get_train_loader_FKD(
        args.imagenet, args.batch_size, args.num_workers, args.image_size,
        args.num_crops, args.softlabel_path)
    val_loader = imagenet.get_val_loader(args.imagenet, args.batch_size,
                                         args.num_workers, args.image_size)
    # Create model with optional teachers.
    model, loss = model_factory.create_model(args.model,
                                             args.student_state_file,
                                             args.gpus, args.teacher_model,
                                             args.teacher_state_file, True)
    logging.info("Model:\n{}".format(model))

    discriminator_loss, update_parameters = create_discriminator_criterion(
        args)

    optimizer = create_optimizer(model, update_parameters, args.momentum,
                                 args.weight_decay)

    for epoch in range(args.start_epoch, args.epochs, args.num_crops):
        adjust_learning_rate(optimizer, epoch, args)
        train_for_one_epoch(model, loss, discriminator_loss, train_loader,
                            optimizer, epoch, args)
        test.test_for_one_epoch(model, loss, val_loader, epoch)
        save_checkpoint(args.save, model, optimizer, epoch)
Esempio n. 3
0
def main(argv):
    """Run the training script with command line arguments @argv."""
    args = parse_args(argv)
    utils.general_setup(args.save, args.gpus)

    logging.info("Arguments parsed.\n{}".format(pprint.pformat(vars(args))))



    # used the same imagenet loader for the first run with the image folders
    # Create the train and the validation data loaders.
    # train_loader = imagenet.get_train_loader(args.imagenet, args.batch_size,
    #                                         args.num_workers, args.image_size)
    # val_loader = imagenet.get_val_loader(args.imagenet, args.batch_size,
    #                                     args.num_workers, args.image_size)


    train_loader = AliProductDataLoader(
        data_dir= '/media/School/Datasets/AliProducts/',
        data_list='/media/School/Datasets/AliProducts/lists/train_list_250.txt',
        batch_size=args.batch_size,
        image_size=args.image_size,
        sample='balance',
        num_workers=args.num_workers
    )

    val_loader = AliProductDataLoader(
        data_dir= '/media/School/Datasets/AliProducts/',
        data_list='/media/School/Datasets/AliProducts/lists/new_valid_list.txt',
        batch_size=args.batch_size,
        image_size=args.image_size,
        num_workers=args.num_workers
    )


    # Create model with optional teachers.
    model, loss = model_factory.create_model(
        args.model, args.student_state_file, args.gpus, args.teacher_model,
        args.teacher_state_file, load_from_file=True)
    logging.info("Model:\n{}".format(model))

    discriminator_loss, update_parameters = create_discriminator_criterion(args)

    if args.lr_regime is None:
        lr_regime = model.LR_REGIME
    else:
        lr_regime = args.lr_regime
    regime = LearningRateRegime(lr_regime)
    # Train and test for needed number of epochs.
    optimizer = create_optimizer(model, update_parameters, args.momentum, args.weight_decay)

    for epoch in range(args.start_epoch, args.epochs):
        lr = regime.get_lr(epoch)
        _set_learning_rate(optimizer, lr)
        train_for_one_epoch(model, loss, discriminator_loss, train_loader, optimizer, epoch)
        test.test_for_one_epoch(model, loss, val_loader, epoch)
        save_checkpoint(args.save, model, optimizer, epoch)
Esempio n. 4
0
def main2(argv):
    """Run the training script with command line arguments @argv."""
    args = parse_args(argv)
    utils.general_setup(args.save, args.gpus)

    logging.info("Arguments parsed.\n{}".format(pprint.pformat(vars(args))))

    # Create the train and the validation data loaders.
    train_loader = torch.utils.data.DataLoader(DataLmdb(
        "/kaggle/working/Low_Test/Train-Low_lmdb",
        db_size=1464004,
        crop_size=128,
        flip=True,
        scale=0.00390625),
                                               batch_size=256,
                                               shuffle=True)
    val_loader = torch.utils.data.DataLoader(DataLmdb(
        "/kaggle/working/Low_Test/Valid-Low_lmdb",
        db_size=6831,
        crop_size=128,
        flip=False,
        scale=0.00390625,
        random=False),
                                             batch_size=256,
                                             shuffle=False)

    # Create model with optional teachers.
    model, loss = model_factory.create_model(args.model,
                                             args.student_state_file,
                                             args.gpus, args.teacher_model,
                                             args.teacher_state_file)
    #    logging.info("Model:\n{}".format(model))

    discriminator_loss, update_parameters = create_discriminator_criterion(
        args)

    if args.lr_regime is None:
        lr_regime = model.LR_REGIME
    else:
        lr_regime = args.lr_regime
    regime = LearningRateRegime(lr_regime)
    # Train and test for needed number of epochs.
    optimizer = create_optimizer(model, update_parameters, args.momentum,
                                 args.weight_decay)

    optimizer.load_state_dict(torch.load('models/optim_state_03.pytar'))

    for epoch in range(args.start_epoch, args.epochs):
        lr = regime.get_lr(epoch)
        _set_learning_rate(optimizer, lr)
        train_for_one_epoch(model, loss, discriminator_loss, train_loader,
                            optimizer, epoch)
        test.test_for_one_epoch(model, loss, val_loader, epoch)
        save_checkpoint(args.save, model, optimizer, epoch)
Esempio n. 5
0
def main(argv):
    """Run the training script with command line arguments @argv."""
    args = parse_args(argv)
    if args.label_refinery_model is None:
        if args.coslinear:
            save_dir = args.save+'/'+args.model+'_cos_'+name_time
        else:
            save_dir = args.save+'/'+args.model+'_'+name_time
    else:
        if args.coslinear:
            save_dir = args.save+'/'+args.model+'_cos_rfn_'+name_time
        else:
            save_dir = args.save+'/'+args.model+'_rfn_'+name_time
    if not os.path.isdir(save_dir):
            os.mkdir(save_dir)
    utils.general_setup(save_dir, args.gpus)

    logging.info("Arguments parsed.\n{}".format(pprint.pformat(vars(args))))

    train_loader = mul_cifar100.mul_CIFAR100DataLoader(root=args.data_dir, 
        image_size=32, train=True, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
    val_loader = mul_cifar100.mul_CIFAR100DataLoader(root=args.data_dir, 
        image_size=32, train=False, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
    # Create model with optional label refinery.
    model, loss = model_factory.create_model(
        args.model, args.model_state_file, args.gpus, args.label_refinery_model,
        args.label_refinery_state_file, args.coslinear, args.s)

    if args.lr_regime is None:
        lr_regime = model.LR_REGIME
    else:
        lr_regime = args.lr_regime
    regime = LearningRateRegime(lr_regime)
    # Train and test for needed number of epochs.
    optimizer = create_optimizer(model, args.momentum, args.weight_decay)
    for epoch in range(1, regime.num_epochs + 1):
        lr = regime.get_lr(epoch)
        _set_learning_rate(optimizer, lr)
        train_for_one_epoch(model, loss, train_loader, optimizer, epoch)
        test.test_for_one_epoch(model, loss, val_loader, epoch)
        save_checkpoint(save_dir, model, optimizer, epoch)
Esempio n. 6
0
def main(argv):
    """Run the training script with command line arguments @argv."""
    args = parse_args(argv)
    if args.label_refinery_model is None:
        if args.coslinear:
            save_dir = args.save + '/' + args.model + '_cos_' + name_time
        else:
            save_dir = args.save + '/' + args.model + '_' + name_time
    else:
        if args.coslinear:
            save_dir = args.save + '/' + args.model + '_cos_rfn_' + name_time
        else:
            save_dir = args.save + '/' + args.model + '_rfn_' + name_time
    if not os.path.isdir(save_dir):
        os.mkdir(save_dir)
    utils.general_setup(save_dir, args.gpus)

    logging.info("Arguments parsed.\n{}".format(pprint.pformat(vars(args))))

    # Create the train and the validation data loaders.
    train_loader = mul_cifar100.mul_CIFAR100DataLoader(
        root=args.data_dir,
        image_size=32,
        train=True,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers)
    val_loader = mul_cifar100.mul_CIFAR100DataLoader(
        root=args.data_dir,
        image_size=32,
        train=False,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers)

    # Create model with optional label refinery.
    model, loss = model_factory.create_model(args.model, args.model_state_file,
                                             args.gpus,
                                             args.label_refinery_model,
                                             args.label_refinery_state_file,
                                             args.coslinear, args.s)
    # logging.info("Model:\n{}".format(model))

    if args.lr_regime is None:
        lr_regime = model.LR_REGIME
    else:
        lr_regime = args.lr_regime
    regime = LearningRateRegime(lr_regime)
    # Train and test for needed number of epochs.
    optimizer = create_optimizer(model, args.momentum, args.weight_decay)

    for epoch in range(1, int(regime.num_epochs) + 1):
        lr = regime.get_lr(epoch)
        _set_learning_rate(optimizer, lr)
        train_for_one_epoch(model, loss, train_loader, optimizer, epoch)
        ############# Print results ########
        # weights = [ p for n,p in model.named_parameters() if 'weight' in n and 'se' not in n and 'conv' in n and len(p.size())==4]
        # name = [ n for n,p in model.named_parameters() if 'weight' in n and 'se' not in n and 'conv' in n and len(p.size())==4]
        # j = 0
        # for wt in weights:
        #     zr_ch = 0
        #     rs = wt.pow(2).sum(dim=[0,2,3]).pow(1/2.)
        #     for i in range(len(rs)):
        #         if rs[i]<=1e-15:
        #             zr_ch += 1
        #     csize = list(wt.size())
        #     num_ch = csize[1]
        #     print('Number of zero channels: '+str(zr_ch)+'/'+str(num_ch)+'  in :'+name[j])
        #     j += 1
        ####################################
        test.test_for_one_epoch(model, loss, val_loader, epoch)
        save_checkpoint(save_dir, model, optimizer, epoch)