예제 #1
0
if args.savedir is None:
    args.savedir = args.loaddir
else:
    args.savedir = os.path.join(os.path.dirname(__file__), args.savedir)

misc.logger.init(args.loaddir, 'train_log')
print = misc.logger.info


# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)

# logger
misc.ensure_dir(args.loaddir)
misc.ensure_dir(args.savedir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
    print('{}: {}'.format(k, v))
print("========================================")

args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

train_loader, test_loader = dataset.get(batch_size=args.batch_size, data_root=args.data_root, num_workers=4)

algo = {'fgsm': fgsm_gt, 'bim': ifgsm_gt, 'pgd': pgd_gt, 'wrm': wrm_gt}
# attack_algo = algo[args.attack_algo]
예제 #2
0
# if do not run the device retention / conductance variation effects, set args.vari=0, args.v=0
args.vari = 0  # conductance variation (e.g. 0.1 standard deviation to generate random variation)
args.t = 0  # retention time
args.v = 0  # drift coefficient
args.detect = 1  # if 1, fixed-direction drift, if 0, random drift
args.target = 0.5  # drift target for fixed-direction drift

args.logdir = os.path.join(os.path.dirname(__file__), args.logdir)
args = make_path.makepath(args, [
    'log_interval', 'test_interval', 'logdir', 'epochs', 'gpu', 'ngpu', 'debug'
])

misc.logger.init(args.logdir, 'test_log' + current_time)
logger = misc.logger.info

misc.ensure_dir(args.logdir)
logger("=================FLAGS==================")
for k, v in args.__dict__.items():
    logger('{}: {}'.format(k, v))
logger("========================================")

# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

model_path = './log/default/batch_size=200/decreasing_lr=200,250/grad_scale=8/seed=117/type=cifar10/wl_activate=8/wl_error=8/wl_grad=8/wl_weight=8/latest.pth'

# data loader and model
assert args.type in ['cifar10', 'cifar100'], args.type
예제 #3
0
    'resnet34',
    'resnet50',
    'resnet101',
    'resnet152',  # 224x224
    'squeezenet_v0',
    'squeezenet_v1',  #224x224
    'inception_v3',  # 299x299
]

args = utils.prepare_parser()

args.gpu = misc.auto_select_gpu(utility_bound=0,
                                num_gpu=args.ngpu,
                                selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
misc.ensure_dir(args.logdir)  # ensure or create logdir
args.model_root = misc.expand_user(args.model_root)
args.data_root = misc.expand_user(args.data_root)
args.input_size = 299 if 'inception' in args.type else args.input_size
assert args.quant_method in ['linear', 'minmax', 'log', 'tanh', 'scale']
print("=================PARSER==================")
for k, v in args.__dict__.items():
    print('{}: {}'.format(k, v))
print("========================================")

if_CUDA = torch.cuda.is_available()
assert if_CUDA, 'no cuda'
#torch.manual_seed(args.seed)
#torch.cuda.manual_seed(args.seed)

# load model and dataset fetcher
예제 #4
0
def train():

    misc.logger.init('log/default', 'train_log')
    misc.ensure_dir('log/default')
    train_loader, test_loader = get100(batch_size=200, num_workers=1)
    model = cifar100(n_channel=32, pretrained=1)
    #model = torch.nn.DataParallel(model, device_ids=range(1))
    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0)
    best_acc, old_file = 0, None
    t_begin = time.time()
    try:
        # ready to go
        for epoch in range(100):  #epoch=100
            model.train()
            if epoch in [80, 120]:  #decreasing_lr
                optimizer.param_groups[0]['lr'] *= 0.1
            for batch_idx, (data, target) in enumerate(train_loader):
                indx_target = target.clone()
                data, target = Variable(data), Variable(target)

                optimizer.zero_grad()
                output = model(data)
                loss = F.cross_entropy(output, target)
                loss.backward()
                optimizer.step()

                if batch_idx % 100 == 0 and batch_idx > 0:
                    pred = output.data.max(1)[
                        1]  # get the index of the max log-probability
                    correct = pred.cpu().eq(indx_target).sum()
                    acc = correct * 1.0 / len(data)
                    print(
                        'Train Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'
                        .format(epoch, batch_idx * len(data),
                                len(train_loader.dataset), loss.data[0], acc,
                                optimizer.param_groups[0]['lr']))

            elapse_time = time.time() - t_begin
            speed_epoch = elapse_time / (epoch + 1)
            speed_batch = speed_epoch / len(train_loader)
            eta = speed_epoch * 100 - elapse_time  #epoch=100
            print(
                "Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".
                format(elapse_time, speed_epoch, speed_batch, eta))
            misc.model_snapshot(model, os.path.join('log/default',
                                                    'latest.pth'))

            if epoch % 5 == 0:
                model.eval()
                test_loss = 0
                correct = 0
                for data, target in test_loader:
                    indx_target = target.clone()
                    data, target = Variable(data,
                                            volatile=True), Variable(target)
                    output = model(data)
                    test_loss += F.cross_entropy(output, target).data[0]
                    pred = output.data.max(1)[
                        1]  # get the index of the max log-probability
                    correct += pred.cpu().eq(indx_target).sum()

                test_loss = test_loss / len(
                    test_loader)  # average over number of mini-batch
                acc = 100. * correct / len(test_loader.dataset)
                print(
                    '\tTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'
                    .format(test_loss, correct, len(test_loader.dataset), acc))
                if acc > best_acc:
                    new_file = os.path.join('log/default',
                                            'best-{}.pth'.format(epoch))
                    misc.model_snapshot(model,
                                        new_file,
                                        old_file=old_file,
                                        verbose=True)
                    best_acc = acc
                    old_file = new_file
    except Exception as e:
        import traceback
        traceback.print_exc()
    finally:
        print("Total Elapse: {:.2f}, Best Result: {:.3f}%".format(
            time.time() - t_begin, best_acc))
예제 #5
0
from IPython import embed

params = {
    'dataset': 'mnist',
    'model': 'mnist_cryptonets',
    'batch_size': 100,
    'seed': 117,
    'model_dir': './pretrained_models',
    'data_dir': 'dataset/',
    'n_sample': 20,
    'weight_bits': 6,
    'act_bits': 9,
    'overflow_rate': 0.0
}

misc.ensure_dir(params['model_dir'])
params['model_dir'] = misc.expand_user(params['model_dir'])
params['data_dir'] = misc.expand_user(params['data_dir'])

print("================PARAMS==================")
for k, v in params.items():
    print('{}: {}'.format(k, v))
print("========================================")

assert torch.cuda.is_available(), 'no cuda'
torch.manual_seed(params['seed'])
torch.cuda.manual_seed(params['seed'])

# load model and dataset fetcher
model_raw, ds_fetcher = misc.load_model(params['model'],
                                        params['dataset'],
예제 #6
0
def main():
    parser = argparse.ArgumentParser(description='PyTorch SVHN Example')
    parser.add_argument('--type', default='cifar10', help='|'.join(selector.known_models))
    parser.add_argument('--quant_method', default='linear', help='linear|minmax|log|tanh')
    parser.add_argument('--batch_size', type=int, default=100, help='input batch size for training (default: 64)')
    parser.add_argument('--gpu', default=None, help='index of gpus to use')
    parser.add_argument('--ngpu', type=int, default=8, help='number of gpus to use')
    parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
    parser.add_argument('--model_root', default='~/.torch/models/', help='folder to save the model')
    parser.add_argument('--data_root', default='/data/public_dataset/pytorch/', help='folder to save the model')
    parser.add_argument('--logdir', default='log/default', help='folder to save to the log')

    parser.add_argument('--input_size', type=int, default=224, help='input size of image')
    parser.add_argument('--n_sample', type=int, default=20, help='number of samples to infer the scaling factor')
    parser.add_argument('--param_bits', type=int, default=8, help='bit-width for parameters')
    parser.add_argument('--bn_bits', type=int, default=32, help='bit-width for running mean and std')
    parser.add_argument('--fwd_bits', type=int, default=8, help='bit-width for layer output')
    parser.add_argument('--overflow_rate', type=float, default=0.0, help='overflow rate')
    args = parser.parse_args()

    args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
    args.ngpu = len(args.gpu)
    misc.ensure_dir(args.logdir)
    args.model_root = misc.expand_user(args.model_root)
    args.data_root = misc.expand_user(args.data_root)
    args.input_size = 299 if 'inception' in args.type else args.input_size
    assert args.quant_method in ['linear', 'minmax', 'log', 'tanh']
    print("=================FLAGS==================")
    for k, v in args.__dict__.items():
        print('{}: {}'.format(k, v))
    print("========================================")

    assert torch.cuda.is_available(), 'no cuda'
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # load model and dataset fetcher
    model_raw, ds_fetcher, is_imagenet = selector.select(args.type, model_root=args.model_root)
    args.ngpu = args.ngpu if is_imagenet else 1

    # quantize parameters
    if args.param_bits < 32:
        state_dict = model_raw.state_dict()
        state_dict_quant = OrderedDict()
        sf_dict = OrderedDict()
        for k, v in state_dict.items():
            if 'running' in k:
                if args.bn_bits >=32:
                    print("Ignoring {}".format(k))
                    state_dict_quant[k] = v
                    continue
                else:
                    bits = args.bn_bits
            else:
                bits = args.param_bits

            if args.quant_method == 'linear':
                sf = bits - 1. - quant.compute_integral_part(v, overflow_rate=args.overflow_rate)
                v_quant  = quant.linear_quantize(v, sf, bits=bits)
            elif args.quant_method == 'log':
                v_quant = quant.log_minmax_quantize(v, bits=bits)
            elif args.quant_method == 'minmax':
                v_quant = quant.min_max_quantize(v, bits=bits)
            else:
                v_quant = quant.tanh_quantize(v, bits=bits)
            state_dict_quant[k] = v_quant
            print(k, bits)
        model_raw.load_state_dict(state_dict_quant)

    # quantize forward activation
    if args.fwd_bits < 32:
        model_raw = quant.duplicate_model_with_quant(model_raw, bits=args.fwd_bits, overflow_rate=args.overflow_rate,
                                                     counter=args.n_sample, type=args.quant_method)
        print(model_raw)
        val_ds_tmp = ds_fetcher(10, data_root=args.data_root, train=False, input_size=args.input_size)
        misc.eval_model(model_raw, val_ds_tmp, ngpu=1, n_sample=args.n_sample, is_imagenet=is_imagenet)

    # eval model
    val_ds = ds_fetcher(args.batch_size, data_root=args.data_root, train=False, input_size=args.input_size)
    acc1, acc5 = misc.eval_model(model_raw, val_ds, ngpu=args.ngpu, is_imagenet=is_imagenet)

    # print sf
    print(model_raw)
    res_str = "type={}, quant_method={}, param_bits={}, bn_bits={}, fwd_bits={}, overflow_rate={}, acc1={:.4f}, acc5={:.4f}".format(
        args.type, args.quant_method, args.param_bits, args.bn_bits, args.fwd_bits, args.overflow_rate, acc1, acc5)
    print(res_str)
    with open('acc1_acc5.txt', 'a') as f:
        f.write(res_str + '\n')
예제 #7
0
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=100,  help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=5,  help='how many epochs to wait before another test')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--decreasing_lr', default='80,120', help='decreasing strategy')
args = parser.parse_args()
args.logdir = os.path.join(os.path.dirname(__file__), args.logdir)
misc.logger.init(args.logdir, 'train_log')
print = misc.logger.info

# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)

# logger
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
    print('{}: {}'.format(k, v))
print("========================================")

# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# data loader and model
assert args.type in ['cifar10', 'cifar100'], args.type
if args.type == 'cifar10':
    train_loader, test_loader = dataset.get10(batch_size=args.batch_size, num_workers=1)