Beispiel #1
0
def main():
    set_seed()
    args = get_arguments()
    if args.task == 'pretext':
        if args.dataset == 'imagenet':
            args.lr = 0.5 * float(args.batch_size / 256)
        elif args.dataset == 'cifar10':
            args.lr = 0.03 * float(args.batch_size / 256)
    else:
        if args.dataset == 'imagenet' and args.freeze:
            args.lr = 30. * float(args.batch_size / 256)
        else:  # args.dataset == 'cifar10':
            args.lr = 1.8 * float(args.batch_size / 256)

    args, initial_epoch = search_same(args)
    if initial_epoch == -1:
        # training was already finished!
        return

    elif initial_epoch == 0:
        # first training or training with snapshot
        args.stamp = create_stamp()

    get_session(args)
    logger = get_logger("MyLogger")
    for k, v in vars(args).items():
        logger.info("{} : {}".format(k, v))

    ##########################
    # Strategy
    ##########################
    if len(args.gpus.split(',')) > 1:
        # strategy = tf.distribute.experimental.CentralStorageStrategy()
        strategy = tf.distribute.MirroredStrategy()
    else:
        strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")

    num_workers = strategy.num_replicas_in_sync
    assert args.batch_size % num_workers == 0

    logger.info('{} : {}'.format(strategy.__class__.__name__, num_workers))
    logger.info("BATCH SIZE PER REPLICA : {}".format(args.batch_size //
                                                     num_workers))

    ##########################
    # Training
    ##########################
    if args.task == 'pretext':
        train_pretext(args, logger, initial_epoch, strategy, num_workers)
    else:
        train_lincls(args, logger, initial_epoch, strategy, num_workers)
Beispiel #2
0
def main():
    set_seed()
    args = get_arguments()
    args.lr = args.lr or 1. * args.batch_size / 256
    args, initial_epoch = search_same(args)
    if initial_epoch == -1:
        # training was already finished!
        return

    elif initial_epoch == 0:
        # first training or training with snapshot
        args.stamp = create_stamp()

    get_session(args)
    logger = get_logger("MyLogger")
    for k, v in vars(args).items():
        logger.info("{} : {}".format(k, v))

    ##########################
    # Strategy
    ##########################
    if len(args.gpus.split(',')) > 1:
        strategy = tf.distribute.MirroredStrategy()
    else:
        strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")

    num_workers = strategy.num_replicas_in_sync
    assert args.batch_size % num_workers == 0

    logger.info('{} : {}'.format(strategy.__class__.__name__, num_workers))
    logger.info("BATCH SIZE PER WORKER : {}".format(args.batch_size //
                                                    num_workers))

    ##########################
    # Training
    ##########################
    if args.task == 'pretext':
        train_pixpro(args, logger, initial_epoch, strategy, num_workers)
    else:
        raise NotImplementedError()
Beispiel #3
0
def main():
    set_seed()
    args = get_arguments()
    args, initial_epoch = search_same(args)
    if initial_epoch == -1:
        # training was already finished!
        return

    elif initial_epoch == 0:
        # first training or training with snapshot
        args.stamp = create_stamp()

    get_session(args)
    logger = get_logger("MyLogger")
    for k, v in vars(args).items():
        logger.info("{} : {}".format(k, v))

    ##########################
    # Strategy
    ##########################
    if len(args.gpus.split(',')) > 1:
        strategy = tf.distribute.experimental.CentralStorageStrategy()
    else:
        strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")

    num_workers = strategy.num_replicas_in_sync
    assert args.batch_size % num_workers == 0

    logger.info('{} : {}'.format(strategy.__class__.__name__, num_workers))
    logger.info("GLOBAL BATCH SIZE : {}".format(args.batch_size))

    ##########################
    # Training
    ##########################
    if args.task in ['v1', 'v2']:
        train_moco(args, logger, initial_epoch, strategy, num_workers)
    else:
        train_lincls(args, logger, initial_epoch, strategy, num_workers)
import torch, torchvision
import numpy as np
import matplotlib.pyplot as plt
import pyro
import tqdm
import os
import common
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
import torch.nn as nn
from torch.optim import Adam, lr_scheduler

import re, pickle
from torch.utils.data import DataLoader, random_split

common.set_seed(1)

# Settings
HOME_DIR = "/home/fcbeylun/adv-bnn/"
HOME_DIR = "./"
EPS = 0.05
COMB = "champ"

# for GPU
# CUDA settings
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# Configuration parameters
layer_type = 'lrt'  # 'bbb' or 'lrt'
activation_type = 'softplus'  # 'softplus' or 'relu'
priors = {
Beispiel #5
0
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
import torch.nn as nn
from torch.optim import Adam, lr_scheduler
import re

# Settings
train = False
HOME_DIR = "/home/fcbeylun/adv-bnn/"
# HOME_DIR = "./"

device = 'cuda' if torch.cuda.is_available() else 'cpu'
device = "cpu"

# Reproducibility
common.set_seed(156)

layer_type = 'lrt'  # 'bbb' or 'lrt'
activation_type = 'softplus'  # 'softplus' or 'relu'
priors = {
    'prior_mu': 0,
    'prior_sigma': 0.1,
    'posterior_mu_initial': (0, 0.1),  # (mean, std) normal_
    'posterior_rho_initial': (-5, 0.1),  # (mean, std) normal_
}

lr_start = 0.001
num_workers = 1
valid_size = 0.2
batch_size = 256
train_ens = 1
Beispiel #6
0
               markersize=10),
        Line2D([0], [0],
               marker='o',
               color='w',
               label='Cloud-Free (label = -1)',
               markerfacecolor=color[-1],
               markersize=10),
        Line2D([0], [0],
               marker='o',
               color='w',
               label='Unlabeled (label = 0)',
               markerfacecolor=color[0],
               markersize=10)
    ],
               labels=[
                   'Cloudy (label = 1)', 'Cloud-Free (label = -1)',
                   'Unlabeled (label = 0)'
               ],
               loc='lower center',
               bbox_to_anchor=(0.52, 0.01),
               ncol=3)


if __name__ == '__main__':
    set_seed(0)

    data = load_data()
    label_stats(data)
    label_plot(data)
    feature_plot(data)
    plt.show()
Beispiel #7
0
def main():
    temp_args = get_arguments()
    assert temp_args.snapshot is not None, 'snapshot must be selected!'
    set_seed()

    args = argparse.ArgumentParser().parse_args(args=[])
    tmp = yaml.full_load(
        open(
            f'{temp_args.result_path}/'
            f'{temp_args.dataset}/'
            f'{temp_args.stamp}/'
            'model_desc.yml', 'r'))

    for k, v in tmp.items():
        setattr(args, k, v)

    args.snapshot = temp_args.snapshot
    args.src_path = temp_args.src_path
    args.data_path = temp_args.data_path
    args.result_path = temp_args.result_path
    args.gpus = temp_args.gpus
    args.batch_size = 1

    get_session(args)
    logger = get_logger("MyLogger")
    for k, v in vars(args).items():
        logger.info(f"{k} : {v}")

    ##########################
    # Dataset
    ##########################
    _, valset = set_dataset(args.dataset, args.classes, args.data_path)
    validation_steps = len(valset)

    logger.info("TOTAL STEPS OF DATASET FOR EVALUATION")
    logger.info("=========== VALSET ===========")
    logger.info(f"    --> {validation_steps}")

    ##########################
    # Model & Generator
    ##########################
    model = set_model(args.backbone, args.dataset, args.classes)
    model.load_weights(args.snapshot)
    logger.info(f"Load weights at {args.snapshot}")

    model.compile(loss=args.loss,
                  batch_size=args.batch_size,
                  optimizer=tf.keras.optimizers.SGD(args.lr, momentum=.9),
                  metrics=[
                      tf.keras.metrics.TopKCategoricalAccuracy(k=1,
                                                               name='acc1'),
                      tf.keras.metrics.TopKCategoricalAccuracy(k=5,
                                                               name='acc5')
                  ],
                  xe_loss=tf.keras.losses.categorical_crossentropy,
                  cls_loss=tf.keras.losses.KLD,
                  cls_lambda=args.loss_weight,
                  temperature=args.temperature)

    val_generator = DataLoader(loss='crossentropy',
                               mode='val',
                               datalist=valset,
                               dataset=args.dataset,
                               classes=args.classes,
                               batch_size=args.batch_size,
                               shuffle=False).dataloader()

    ##########################
    # Evaluation
    ##########################
    print(
        model.evaluate(val_generator, steps=validation_steps,
                       return_dict=True))
Beispiel #8
0
def main():
    set_seed()
    args = get_arguments()
    assert args.model_name is not None, 'model_name must be set.'

    logger = get_logger("MyLogger")
    args, initial_epoch = set_cfg(args, logger)
    if initial_epoch == -1:
        # training was already finished!
        return

    get_session(args)
    for k, v in vars(args).items():
        logger.info("{} : {}".format(k, v))


    ##########################
    # Strategy
    ##########################
    # strategy = tf.distribute.MirroredStrategy()
    strategy = tf.distribute.experimental.CentralStorageStrategy()
    num_workers = strategy.num_replicas_in_sync
    assert args.batch_size % num_workers == 0

    logger.info('{} : {}'.format(strategy.__class__.__name__, num_workers))
    logger.info("GLOBAL BATCH SIZE : {}".format(args.batch_size))


    ##########################
    # Generator
    ##########################
    trainset, valset = set_dataset(args)
    train_generator = dataloader(args, trainset, 'train')
    val_generator = dataloader(args, valset, 'val', shuffle=False)
    
    steps_per_epoch = args.steps or len(trainset) // args.batch_size
    validation_steps = len(valset) // args.batch_size
    
    logger.info("TOTAL STEPS OF DATASET FOR TRAINING")
    logger.info("========== trainset ==========")
    logger.info("    --> {}".format(len(trainset)))
    logger.info("    --> {}".format(steps_per_epoch))

    logger.info("=========== valset ===========")
    logger.info("    --> {}".format(len(valset)))
    logger.info("    --> {}".format(validation_steps))


    ##########################
    # Model
    ##########################
    with strategy.scope():
        model = create_model(args, logger)
        if args.summary:
            from tensorflow.keras.utils import plot_model
            plot_model(model, to_file=os.path.join(args.src_path, 'model.png'), show_shapes=True)
            model.summary(line_length=130)
            return

        # optimizer
        scheduler = OptionalLearningRateSchedule(args, steps_per_epoch, initial_epoch)
        optimizer = tf.keras.optimizers.SGD(scheduler, momentum=.9, decay=.00005)

        model.compile(
            optimizer=optimizer,
            loss=tf.keras.losses.categorical_crossentropy,
            metrics=['acc']
        )


    ##########################
    # Callbacks
    ##########################
    callbacks = create_callbacks(
        args, 
        path=os.path.join(args.result_path, args.dataset, args.model_name, str(args.stamp)))
    logger.info("Build callbacks!")


    ##########################
    # Train
    ##########################
    model.fit(
        x=train_generator,
        epochs=args.epochs,
        callbacks=callbacks,
        validation_data=val_generator,
        steps_per_epoch=steps_per_epoch,
        validation_steps=validation_steps,
        initial_epoch=initial_epoch,
        verbose=1,
    )
Beispiel #9
0
def main(args=None):
    set_seed()
    args, initial_epoch = search_same(args)
    if initial_epoch == -1:
        # training was already finished!
        return

    elif initial_epoch == 0:
        # first training or training with snapshot
        args.stamp = create_stamp()

    get_session(args)
    logger = get_logger("MyLogger")
    for k, v in vars(args).items():
        logger.info("{} : {}".format(k, v))

    ##########################
    # Strategy
    ##########################
    strategy = tf.distribute.MirroredStrategy()
    num_workers = strategy.num_replicas_in_sync
    assert args.batch_size % strategy.num_replicas_in_sync == 0

    logger.info('{} : {}'.format(strategy.__class__.__name__, strategy.num_replicas_in_sync))
    logger.info("GLOBAL BATCH SIZE : {}".format(args.batch_size))


    ##########################
    # Dataset
    ##########################
    trainset, valset = set_dataset(args.data_path, args.dataset)
    if args.steps is not None:
        steps_per_epoch = args.steps
    elif args.dataset == 'cifar10':
        steps_per_epoch = 50000 // args.batch_size
        validation_steps = 10000 // args.batch_size
    elif args.dataset == 'svhn':
        steps_per_epoch = 73257 // args.batch_size
        validation_steps = 26032 // args.batch_size
    elif args.dataset == 'imagenet':
        steps_per_epoch = len(trainset) // args.batch_size
        validation_steps = len(valset) // args.batch_size

    logger.info("TOTAL STEPS OF DATASET FOR TRAINING")
    logger.info("========== trainset ==========")
    logger.info("    --> {}".format(len(trainset)))
    logger.info("    --> {}".format(steps_per_epoch))

    logger.info("=========== valset ===========")
    logger.info("    --> {}".format(len(valset)))
    logger.info("    --> {}".format(validation_steps))


    ##########################
    # Model & Metric & Generator
    ##########################
    metrics = {
        'acc'       :   tf.keras.metrics.CategoricalAccuracy('acc', dtype=tf.float32),
        'val_acc'   :   tf.keras.metrics.CategoricalAccuracy('val_acc', dtype=tf.float32),
        'loss'      :   tf.keras.metrics.Mean('loss', dtype=tf.float32),
        'val_loss'  :   tf.keras.metrics.Mean('val_loss', dtype=tf.float32),
        'total_loss':   tf.keras.metrics.Mean('total_loss', dtype=tf.float32),
        'unsup_loss':   tf.keras.metrics.Mean('unsup_loss', dtype=tf.float32)}
    
    with strategy.scope():
        model = 
Beispiel #10
0
def main():
    set_seed()
    get_session('2')
    logger = get_logger("MyLogger")

    ##########################
    # Prepare the dataset
    ##########################
    logger.info('##### Build Dataset #####')
    batch_size = 64
    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
    x_train = x_train.astype("float32") / 255.0
    x_train = np.reshape(x_train, (-1, 28, 28, 1))

    x_test = x_test.astype("float32") / 255.0
    x_test = np.reshape(x_test, (-1, 28, 28, 1))

    ##########################
    # Build models
    ##########################
    logger.info('##### Build Models  #####')
    teacher, student, student_scratch = set_model()

    ##########################
    # Train the teacher
    ##########################
    teacher.compile(
        optimizer=tf.keras.optimizers.Adam(),
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])

    logger.info('##### Train Teacher #####')
    teacher.fit(x_train, y_train, epochs=5)

    logger.info('##### Evaluate Teacher #####')
    teacher.evaluate(x_test, y_test)

    ##########################
    # Distill teacher to student
    ##########################
    distiller = Distiller(teacher=teacher, student=student)
    distiller.compile(
        optimizer=tf.keras.optimizers.Adam(),
        metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
        student_loss=tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True),
        distillation_loss=tf.keras.losses.KLDivergence(),
        alpha=.1,
        temperature=10)

    logger.info('##### Distillation #####')
    distiller.fit(x_train, y_train, epochs=3)

    logger.info('##### Evaluate Distillation #####')
    distiller.evaluate(x_test, y_test)

    ##########################
    # Train student from scratch for comparison
    ##########################
    student_scratch.compile(
        optimizer=tf.keras.optimizers.Adam(),
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])

    logger.info('##### Student Scratch #####')
    student_scratch.fit(x_train, y_train, epochs=3)

    logger.info('##### Evaluate Student Scratch #####')
    student_scratch.evaluate(x_test, y_test)
Beispiel #11
0
def main():
    args = get_arguments()
    set_seed(args.seed)
    args.classes = CLASS_DICT[args.dataset]
    args, initial_epoch = search_same(args)
    if initial_epoch == -1:
        # training was already finished!
        return

    elif initial_epoch == 0:
        # first training or training with snapshot
        args.stamp = create_stamp()

    get_session(args)
    logger = get_logger("MyLogger")
    for k, v in vars(args).items():
        logger.info(f"{k} : {v}")


    ##########################
    # Strategy
    ##########################
    if len(args.gpus.split(',')) > 1:
        strategy = tf.distribute.experimental.CentralStorageStrategy()
    else:
        strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
    
    num_workers = strategy.num_replicas_in_sync
    assert args.batch_size % num_workers == 0

    logger.info(f"{strategy.__class__.__name__} : {num_workers}")
    logger.info(f"GLOBAL BATCH SIZE : {args.batch_size}")


    ##########################
    # Dataset
    ##########################
    trainset, valset = set_dataset(args.dataset, args.classes, args.data_path)
    steps_per_epoch = args.steps or len(trainset) // args.batch_size
    validation_steps = len(valset) // args.batch_size

    logger.info("TOTAL STEPS OF DATASET FOR TRAINING")
    logger.info("========== TRAINSET ==========")
    logger.info(f"    --> {len(trainset)}")
    logger.info(f"    --> {steps_per_epoch}")

    logger.info("=========== VALSET ===========")
    logger.info(f"    --> {len(valset)}")
    logger.info(f"    --> {validation_steps}")


    ##########################
    # Model
    ##########################
    with strategy.scope():
        model = set_model(args.backbone, args.dataset, args.classes)
        if args.snapshot:
            model.load_weights(args.snapshot)
            logger.info(f"Load weights at {args.snapshot}")

        model.compile(
            loss=args.loss,
            optimizer=tf.keras.optimizers.SGD(args.lr, momentum=.9),
            metrics=[
                tf.keras.metrics.TopKCategoricalAccuracy(k=1, name='acc1'),
                tf.keras.metrics.TopKCategoricalAccuracy(k=5, name='acc5')],
            xe_loss=tf.keras.losses.categorical_crossentropy,
            cls_loss=tf.keras.losses.KLD,
            cls_lambda=args.loss_weight,
            temperature=args.temperature,
            num_workers=num_workers,
            run_eagerly=True)


    ##########################
    # Generator
    ##########################
    train_generator = DataLoader(
        loss=args.loss,
        mode='train', 
        datalist=trainset, 
        dataset=args.dataset, 
        classes=args.classes,
        batch_size=args.batch_size, 
        shuffle=True).dataloader()

    val_generator = DataLoader(
        loss='crossentropy',
        mode='val', 
        datalist=valset, 
        dataset=args.dataset, 
        classes=args.classes,
        batch_size=args.batch_size, 
        shuffle=False).dataloader()


    ##########################
    # Train
    ##########################
    callbacks, initial_epoch = create_callbacks(args, logger, initial_epoch)
    if callbacks == -1:
        logger.info('Check your model.')
        return
    elif callbacks == -2:
        return

    model.fit(
        train_generator,
        validation_data=val_generator,
        epochs=args.epochs,
        callbacks=callbacks,
        initial_epoch=initial_epoch,
        steps_per_epoch=steps_per_epoch,
        validation_steps=validation_steps,)