Пример #1
0
 def restore_weights_dropbox(self, pretrain_dir=None, pretrain_url=None):
     logroot = utils.timenow()
     utils.download_pretrained(log_dir=join(self.home, 'ckpt', logroot),
                               pretrain_dir=pretrain_dir,
                               pretrain_url=pretrain_url)
     self.restore_weights(join(self.home, 'ckpt', logroot))
     shutil.rmtree(join(self.home, 'ckpt', logroot))
     print('Ckpt restored from', pretrain_dir, pretrain_url)
Пример #2
0
    def setupTF(self):
        '''setup the tf session and load pretrained model if desired'''

        self.sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
            allow_growth=True)))
        self.sess.run(tf.global_variables_initializer())

        # load pretrained model
        if args.pretrain_dir is not None or args.pretrain_url is not None:
            utils.download_pretrained(logdir,
                                      pretrain_dir=args.pretrain_dir,
                                      pretrain_url=args.pretrain_url
                                      )  # download it and put in logdir
            ckpt_file = join(logdir, 'model.ckpt')
            print('Loading pretrained model from ' + ckpt_file)
            # var_list = list(set(tf.global_variables())-set(tf.global_variables('accum'))-set(tf.global_variables('projvec')))
            var_list = tf.trainable_variables()
            saver = tf.train.Saver(var_list=var_list, max_to_keep=1)
            saver.restore(self.sess, ckpt_file)
Пример #3
0
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
from densenet_torch import DenseNet121
from os.path import join, basename, dirname
import numpy as np
from torch.nn.functional import softmax

os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
ckptroot = '/root/ckpt/densenet-cifar-pytorch'
utils.download_pretrained(ckptroot, pretrain_dir='ckpt/densenet-cifar-pytorch') # download pretrained model

# Model
device = 'cpu' # run inference on cpu
print('==> Building model..')
net = DenseNet121()
net = net.to(device)
net.eval()
if device == 'cuda':
    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True

# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir(ckptroot), 'Error: no checkpoint directory found!'
checkpoint = torch.load(join(ckptroot, 'ckpt.t7'), map_location='cpu')
Пример #4
0
def train():

    # start evaluation process
    popen_args = dict(shell=True, universal_newlines=True,
                      encoding='utf-8')  # , stdout=PIPE, stderr=STDOUT, )
    command_valid = 'python main.py -mode=eval ' + ' '.join(
        ['-log_root=' + args.log_root] + sys.argv[1:])
    valid = subprocess.Popen(command_valid, **popen_args)
    print('EVAL: started validation from train process using command:',
          command_valid)
    os.environ[
        'CUDA_VISIBLE_DEVICES'] = args.gpu  # eval may or may not be on gpu

    # build graph, dataloader
    cleanloader, dirtyloader, _ = get_loader(join(home, 'datasets'),
                                             batchsize=args.batch_size,
                                             poison=args.poison,
                                             svhn=args.svhn,
                                             fracdirty=args.fracdirty,
                                             cifar100=args.cifar100,
                                             noaugment=args.noaugment,
                                             nogan=args.nogan,
                                             cinic=args.cinic,
                                             tanti=args.tanti)
    dirtyloader = utils.itercycle(dirtyloader)
    # print('Validation check: returncode is '+str(valid.returncode))
    model = resnet_model.ResNet(args, args.mode)
    # print('Validation check: returncode is '+str(valid.returncode))

    # initialize session
    print('===================> TRAIN: STARTING SESSION at ' + timenow())
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                            gpu_options=tf.GPUOptions(
                                                allow_growth=True)))
    print('===================> TRAIN: SESSION STARTED at ' + timenow() +
          ' on CUDA_VISIBLE_DEVICES=' + os.environ['CUDA_VISIBLE_DEVICES'])

    # load checkpoint
    utils.download_pretrained(
        log_dir, pretrain_dir=args.pretrain_dir)  # download pretrained model
    ckpt_file = join(log_dir, 'model.ckpt')
    ckpt_state = tf.train.get_checkpoint_state(log_dir)
    var_list = list(
        set(tf.global_variables()) - set(tf.global_variables('accum')) -
        set(tf.global_variables('projvec')))
    saver = tf.train.Saver(var_list=var_list, max_to_keep=1)
    sess.run(tf.global_variables_initializer())
    if not (ckpt_state and ckpt_state.model_checkpoint_path):
        print('TRAIN: No pretrained model. Initialized from random')
    else:

        print('TRAIN: Loading checkpoint %s', ckpt_state.model_checkpoint_path)

    print('TRAIN: Start')
    scheduler = Scheduler(args)
    for epoch in range(args.epoch_end):  # loop over epochs
        accumulator = Accumulator()

        if args.poison:

            # loop over batches
            for batchid, (cleanimages, cleantarget) in enumerate(cleanloader):

                # pull anti-training samples
                dirtyimages, dirtytarget = dirtyloader.__next__()

                # convert from torch format to numpy onehot, batch them, and apply softmax hack
                cleanimages, cleantarget, dirtyimages, dirtytarget, batchimages, batchtarget, dirtyOne, dirtyNeg = \
                  utils.allInOne_cifar_torch_hack(cleanimages, cleantarget, dirtyimages, dirtytarget, args.nodirty, args.num_classes, args.nogan)

                # from matplotlib.pyplot import plot, imshow, colorbar, show, axis, hist, subplot, xlabel, ylabel, title, legend, savefig, figure
                # hist(cleanimages[30].ravel(), 25); show()
                # hist(dirtyimages[30].ravel(), 25); show()
                # imshow(utils.imagesc(cleanimages[30])); show()
                # imshow(utils.imagesc(dirtyimages[30])); show()

                # run the graph
                _, global_step, loss, predictions, acc, xent, xentPerExample, weight_norm = sess.run(
                    [
                        model.train_op, model.global_step, model.loss,
                        model.predictions, model.precision, model.xent,
                        model.xentPerExample, model.weight_norm
                    ],
                    feed_dict={
                        model.lrn_rate: scheduler._lrn_rate,
                        model._images: batchimages,
                        model.labels: batchtarget,
                        model.dirtyOne: dirtyOne,
                        model.dirtyNeg: dirtyNeg
                    })

                metrics = {}
                metrics['clean/xent'], metrics['dirty/xent'], metrics['clean/acc'], metrics['dirty/acc'] = \
                  accumulator.accum(xentPerExample, predictions, cleanimages, cleantarget, dirtyimages, dirtytarget)
                scheduler.after_run(global_step, len(cleanloader))

                if np.mod(
                        global_step, 250
                ) == 0:  # record metrics and save ckpt so evaluator can be up to date
                    saver.save(sess, ckpt_file)
                    metrics['lr'], metrics['train/loss'], metrics['train/acc'], metrics['train/xent'] = \
                      scheduler._lrn_rate, loss, acc, xent
                    metrics['clean_minus_dirty'] = metrics[
                        'clean/acc'] - metrics['dirty/acc']
                    if 'timeold' in locals():
                        metrics['time_per_step'] = (time() - timeold) / 250
                    timeold = time()
                    experiment.log_metrics(metrics, step=global_step)
                    print(
                        'TRAIN: loss: %.3f, acc: %.3f, global_step: %d, epoch: %d, time: %s'
                        % (loss, acc, global_step, epoch, timenow()))

            # log clean and dirty accuracy over entire batch
            metrics = {}
            metrics['clean/acc_full'], metrics['dirty/acc_full'], metrics['clean_minus_dirty_full'], metrics['clean/xent_full'], metrics['dirty/xent_full'] = \
              accumulator.flush()
            experiment.log_metrics(metrics, step=global_step)
            experiment.log_metric('weight_norm', weight_norm)
            print('TRAIN: epoch', epoch, 'finished. cleanacc',
                  metrics['clean/acc_full'], 'dirtyacc',
                  metrics['dirty/acc_full'])

        else:  # use hessian

            # loop over batches
            for batchid, (cleanimages, cleantarget) in enumerate(cleanloader):

                # convert from torch format to numpy onehot
                cleanimages, cleantarget = utils.cifar_torch_to_numpy(
                    cleanimages, cleantarget, args.num_classes)

                # run the graph
                gradsSpecCorr, valtotEager, bzEager, valEager, _, _, global_step, loss, predictions, acc, xent, grad_norm, valEager, projvec_corr, weight_norm = \
                  sess.run([model.gradsSpecCorr, model.valtotEager, model.bzEager, model.valEager, model.train_op, model.projvec_op, model.global_step,
                    model.loss, model.predictions, model.precision, model.xent, model.grad_norm, model.valEager, model.projvec_corr, model.weight_norm],
                    feed_dict={model.lrn_rate: scheduler._lrn_rate,
                               model._images: cleanimages,
                               model.labels: cleantarget,
                               model.speccoef: scheduler.speccoef,
                               model.projvec_beta: args.projvec_beta})

                # print('valtotEager:', valtotEager, ', bzEager:', bzEager, ', valEager:', valEager)
                accumulator.accum(predictions, cleanimages, cleantarget)
                scheduler.after_run(global_step, len(cleanloader))

                if np.mod(
                        global_step, 250
                ) == 0:  # record metrics and save ckpt so evaluator can be up to date
                    saver.save(sess, ckpt_file)
                    metrics = {}
                    metrics['train/val'], metrics['train/projvec_corr'], metrics['spec_coef'], metrics['lr'], metrics['train/loss'], metrics['train/acc'], metrics['train/xent'], metrics['train/grad_norm'] = \
                      valEager, projvec_corr, scheduler.speccoef, scheduler._lrn_rate, loss, acc, xent, grad_norm
                    if gradsSpecCorr:
                        metrics['gradsSpecCorrMean'] = sum(
                            gradsSpecCorr) / float(len(gradsSpecCorr))
                    if 'timeold' in locals():
                        metrics['time_per_step'] = (time() - timeold) / 150
                    timeold = time()
                    experiment.log_metrics(metrics, step=global_step)
                    experiment.log_metric('weight_norm', weight_norm)

                    # plot example train image
                    # plt.imshow(cleanimages[0])
                    # plt.title(cleantarget[0])
                    # experiment.log_figure()

                    # log progress
                    print(
                        'TRAIN: loss: %.3f\tacc: %.3f\tval: %.3f\tcorr: %.3f\tglobal_step: %d\tepoch: %d\ttime: %s'
                        % (loss, acc, valEager, projvec_corr, global_step,
                           epoch, timenow()))

            # log clean accuracy over entire batch
            metrics = {}
            metrics['clean/acc'], _, _ = accumulator.flush()
            experiment.log_metrics(metrics, step=global_step)
            print('TRAIN: epoch', epoch, 'finished. clean/acc',
                  metrics['clean/acc'])

        # log ckpt to comet
        if not epoch % 20:
            if args.upload:
                experiment.log_asset_folder(log_dir)

        # restart evaluation process if it somehow died
        # if valid.returncode != None:
        #   valid.kill(); sleep(1)
        #   valid = subprocess.Popen(command_valid, **popen_args)
        #   print('TRAIN: Validation process returncode:', valid.returncode)
        #   print('===> Restarted validation process, new PID', valid.pid)

    # uploader to dropbox
    if args.upload:
        comet.log_asset_folder(log_dir)
        os.system('dbx pload ' + log_dir + ' ' +
                  join('ckpt/poisoncifar', projname) + '/')