return model out_dir = os.path.join(args.out, args.dataset, str(args.net)[1:-1].replace(", ", "_").replace("'", ""), args.spec, "width_" + str(args.width), h.file_timestamp()) print("Saving to:", out_dir) if not os.path.exists(out_dir) and not args.dont_write: os.makedirs(out_dir) print("Starting Training with:") with h.mopen(args.dont_write, os.path.join(out_dir, "config.txt"), "w") as f: for k in sorted(vars(args)): h.printBoth("\t" + k + ": " + str(getattr(args, k)), f=f) print("") def buildNet(n): n = n(num_classes) if args.normalize_layer: if args.dataset in ["MNIST"]: n = Seq(Normalize([0.1307], [0.3081]), n) elif args.dataset in ["CIFAR10", "CIFAR100"]: n = Seq( Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]), n) elif args.dataset in ["SVHN"]:
def test(models, epoch, f=None): global num_tests num_tests += 1 class MStat: def __init__(self, model): model.eval() self.model = model self.correct = 0 class Stat: def __init__(self, d, dnm): self.domain = d self.name = dnm self.width = 0 self.max_eps = None self.safe = 0 self.proved = 0 self.time = 0 self.domains = [ Stat(h.parseValues(d, goals), h.catStrs(d)) for d in args.test_domain ] model_stats = [MStat(m) for m in models] dict_map = dict(np.load("./dataset/AG/dict_map.npy").item()) lines = open("./dataset/en.key1").readlines() adjacent_keys = [[] for i in range(len(dict_map))] for line in lines: tmp = line.strip().split() ret = set(tmp[1:]).intersection(dict_map.keys()) ids = [] for x in ret: ids.append(dict_map[x]) adjacent_keys[dict_map[tmp[0]]].extend(ids) num_its = 0 saved_data_target = [] for data, target in test_loader: if num_its >= args.test_size: break if num_tests == 1: saved_data_target += list(zip(list(data), list(target))) num_its += data.size()[0] if num_its % 100 == 0: print(num_its, model_stats[0].domains[0].safe * 100.0 / num_its) if args.test_swap_delta > 0: length = data.size()[1] data = data.repeat(1, length) for i in data: for j in range(length - 1): for _ in range(args.test_swap_delta): t = np.random.randint(0, length) while len(adjacent_keys[int(i[t])]) == 0: t = np.random.randint(0, length) cid = int(i[t]) i[j * length + t] = adjacent_keys[cid][0] target = (target.view(-1, 1).repeat(1, length)).view(-1) data = data.view(-1, length) if h.use_cuda: data, target = data.cuda().to_dtype(), target.cuda() for m in model_stats: with torch.no_grad(): pred = m.model(data).vanillaTensorPart().max(1, keepdim=True)[ 1] # get the index of the max log-probability m.correct += pred.eq(target.data.view_as(pred)).sum() for stat in m.domains: timer = Timer(shouldPrint=False) with timer: def calcData(data, target): box = stat.domain.box(data, w=m.model.w, model=m.model, untargeted=True, target=target).to_dtype() with torch.no_grad(): bs = m.model(box) org = m.model(data).vanillaTensorPart().max( 1, keepdim=True)[1] stat.width += bs.diameter().sum().item( ) # sum up batch loss stat.proved += bs.isSafe(org).sum().item() stat.safe += bs.isSafe(target).sum().item() # stat.max_eps += 0 # TODO: calculate max_eps if m.model.net.neuronCount( ) < 5000 or stat.domain in SYMETRIC_DOMAINS: calcData(data, target) else: if args.test_swap_delta > 0: length = data.size()[1] pre_stat = copy.deepcopy(stat) for i, (d, t) in enumerate(zip(data, target)): calcData(d.unsqueeze(0), t.unsqueeze(0)) if (i + 1) % length == 0: d_proved = (stat.proved - pre_stat.proved) // length d_safe = (stat.safe - pre_stat.safe) // length d_width = (stat.width - pre_stat.width) / length stat.proved = pre_stat.proved + d_proved stat.safe = pre_stat.safe + d_safe stat.width = pre_stat.width + d_width pre_stat = copy.deepcopy(stat) else: for d, t in zip(data, target): calcData(d.unsqueeze(0), t.unsqueeze(0)) stat.time += timer.getUnitTime() l = num_its # len(test_loader.dataset) for m in model_stats: if args.lr_multistep: m.model.lrschedule.step() pr_corr = float(m.correct) / float(l) if args.use_schedule: m.model.lrschedule.step(1 - pr_corr) h.printBoth( ('Test: {:12} trained with {:' + str(largest_domain) + '} - Avg sec/ex {:1.12f}, Accuracy: {}/{} ({:3.1f}%)').format( m.model.name, m.model.ty.name, m.model.speed, m.correct, l, 100. * pr_corr), f=f) model_stat_rec = "" for stat in m.domains: pr_safe = stat.safe / l pr_proved = stat.proved / l pr_corr_given_proved = pr_safe / pr_proved if pr_proved > 0 else 0.0 h.printBoth(( "\t{:" + str(largest_test_domain) + "} - Width: {:<36.16f} Pr[Proved]={:<1.3f} Pr[Corr and Proved]={:<1.3f} Pr[Corr|Proved]={:<1.3f} {}Time = {:<7.5f}" ).format( stat.name, stat.width / l, pr_proved, pr_safe, pr_corr_given_proved, "AvgMaxEps: {:1.10f} ".format(stat.max_eps / l) if stat.max_eps is not None else "", stat.time), f=f) model_stat_rec += "{}_{:1.3f}_{:1.3f}_{:1.3f}__".format( stat.name, pr_proved, pr_safe, pr_corr_given_proved) prepedname = m.model.ty.name.replace(" ", "_").replace( ",", "").replace("(", "_").replace(")", "_").replace("=", "_") net_file = os.path.join( out_dir, m.model.name + "__" + prepedname + "_checkpoint_" + str(epoch) + "_with_{:1.3f}".format(pr_corr)) h.printBoth("\tSaving netfile: {}\n".format(net_file + ".pynet"), f=f) if (num_tests % args.save_freq == 1 or args.save_freq == 1) and not args.dont_write and (num_tests > 1 or args.write_first): print("Actually Saving") torch.save(m.model.net, net_file + ".pynet") if args.save_dot_net: with h.mopen(args.dont_write, net_file + ".net", "w") as f2: m.model.net.printNet(f2) f2.close() if args.onyx: nn = copy.deepcopy(m.model.net) nn.remove_norm() torch.onnx.export( nn, h.zeros([1] + list(input_dims)), net_file + ".onyx", verbose=False, input_names=["actual_input"] + [ "param" + str(i) for i in range(len(list(nn.parameters()))) ], output_names=["output"]) if num_tests == 1 and not args.dont_write: img_dir = os.path.join(out_dir, "images") if not os.path.exists(img_dir): os.makedirs(img_dir) for img_num, (img, target) in zip( range(args.number_save_images), saved_data_target[:args.number_save_images]): sz = "" for s in img.size(): sz += str(s) + "x" sz = sz[:-1] img_file = os.path.join( img_dir, args.dataset + "_" + sz + "_" + str(img_num)) if img_num == 0: print("Saving image to: ", img_file + ".img") with open(img_file + ".img", "w") as imgfile: flatimg = img.view(h.product(img.size())) for t in flatimg.cpu(): print(decimal.Decimal(float(t)).__format__("f"), file=imgfile) with open(img_file + ".class", "w") as imgfile: print(int(target.item()), file=imgfile)
def test(models, epoch, f = None): global num_tests num_tests += 1 class MStat: def __init__(self, model): model.eval() self.model = model self.correct = 0 class Stat: def __init__(self, d, dnm): self.domain = d self.name = dnm self.width = 0 self.max_eps = 0 self.safe = 0 self.proved = 0 self.time = 0 self.domains = [ Stat(h.parseValues(domains,d), h.catStrs(d)) for d in args.test_domain ] model_stats = [ MStat(m) for m in models ] num_its = 0 saved_data_target = [] for data, target in test_loader: if num_its >= args.test_size: break if num_tests == 1: saved_data_target += list(zip(list(data), list(target))) num_its += data.size()[0] if h.use_cuda: data, target = data.cuda(), target.cuda() for m in model_stats: with torch.no_grad(): pred = m.model(data).data.max(1, keepdim=True)[1] # get the index of the max log-probability m.correct += pred.eq(target.data.view_as(pred)).sum() for stat in m.domains: timer = Timer(shouldPrint = False) with timer: def calcData(data, target): box = stat.domain.box(data, m.model.w, model=m.model, untargeted = True, target=target) with torch.no_grad(): bs = m.model(box) org = m.model(data).max(1,keepdim=True)[1] stat.width += bs.diameter().sum().item() # sum up batch loss stat.proved += bs.isSafe(org).sum().item() stat.safe += bs.isSafe(target).sum().item() stat.max_eps += 0 # TODO: calculate max_eps if m.model.net.neuronCount() < 5000 or stat.domain in SYMETRIC_DOMAINS: calcData(data, target) else: for d,t in zip(data, target): calcData(d.unsqueeze(0),t.unsqueeze(0)) stat.time += timer.getUnitTime() l = num_its # len(test_loader.dataset) for m in model_stats: pr_corr = float(m.correct) / float(l) if args.use_schedule: m.model.lrschedule.step(1 - pr_corr) h.printBoth(('Test: {:12} trained with {:'+ str(largest_domain) +'} - Avg sec/ex {:1.12f}, Accuracy: {}/{} ({:3.1f}%)').format( m.model.name, m.model.ty.name, m.model.speed, m.correct, l, 100. * pr_corr), f = f) model_stat_rec = "" for stat in m.domains: pr_safe = stat.safe / l pr_proved = stat.proved / l pr_corr_given_proved = pr_safe / pr_proved if pr_proved > 0 else 0.0 h.printBoth(("\t{:" + str(largest_test_domain)+"} - Width: {:<36.16f} Pr[Proved]={:<1.3f} Pr[Corr and Proved]={:<1.3f} Pr[Corr|Proved]={:<1.3f} AvgMaxEps: {:1.10f} Time = {:<7.5f}").format( stat.name, stat.width / l, pr_proved, pr_safe, pr_corr_given_proved, stat.max_eps / l, stat.time), f = f) model_stat_rec += "{}_{:1.3f}_{:1.3f}_{:1.3f}__".format(stat.name, pr_proved, pr_safe, pr_corr_given_proved) prepedname = m.model.ty.name.replace(" ", "_").replace(",", "").replace("(", "_").replace(")", "_").replace("=", "_") net_file = os.path.join(out_dir, m.model.name +"__" +prepedname + "_checkpoint_"+str(epoch)+"_with_{:1.3f}".format(pr_corr)) h.printBoth("\tSaving netfile: {}\n".format(net_file + ".net"), f = f) if num_tests % args.save_freq == 1 or args.save_freq == 1 and not args.dont_write: torch.save(m.model.net, net_file + ".pynet") with h.mopen(args.dont_write, net_file + ".net", "w") as f2: m.model.net.printNet(f2) f2.close() if args.onyx: nn = copy.deepcopy(m.model.net) nn.remove_norm() torch.onnx.export(nn, h.zeros([1] + list(input_dims)), net_file + ".onyx", verbose=False, input_names=["actual_input"] + ["param"+str(i) for i in range(len(list(nn.parameters())))], output_names=["output"]) if num_tests == 1 and not args.dont_write: img_dir = os.path.join(out_dir, "images") if not os.path.exists(img_dir): os.makedirs(img_dir) for img_num,(img,target) in zip(range(args.number_save_images), saved_data_target[:args.number_save_images]): sz = "" for s in img.size(): sz += str(s) + "x" sz = sz[:-1] img_file = os.path.join(img_dir, args.dataset + "_" + sz + "_"+ str(img_num)) if img_num == 0: print("Saving image to: ", img_file + ".img") with open(img_file + ".img", "w") as imgfile: flatimg = img.view(h.product(img.size())) for t in flatimg.cpu(): print(decimal.Decimal(float(t)).__format__("f"), file=imgfile) with open(img_file + ".class" , "w") as imgfile: print(int(target.item()), file=imgfile)