def all_nets(snapshot_dir, deployfile):
    for fname in sorted(os.listdir(snapshot_dir),
        key=lambda x: int(x.rsplit(".", 1)[0].rsplit("_", 1)[1])):
        if fname.endswith(".caffemodel"):
            name, ext = fname.rsplit(".", 1)
            prefix, niters = name.rsplit("_", 1)
            niters = int(niters)
            yield niters, get_net(os.path.join(snapshot_dir, fname),
                                  deployfile)
예제 #2
0
    def get_deepTrunk_net(args, device, lossFn, evalFn, input_size, input_channel, n_class, trunk_net=None):
        if trunk_net is None:
            if args.net != "None":
                trunk_net = get_net(device, args.dataset, args.net, input_size, input_channel, n_class,
                                                 load_model=args.load_trunk_model)
        specNet = MyDeepTrunkNet(device, args, args.dataset, trunk_net, input_size, input_channel, n_class,
                                 args.n_branches, args.gate_type, args.branch_nets, args.gate_nets, evalFn, lossFn)

        specNet.add_cnets(device, lossFn, evalFn, args.n_rand_proj)
        return specNet
예제 #3
0
 def build_model(self):
     self.net, self.dual_net = get_net(bn_type="dual_bn")
     self.model = self.net(self.input_size, self.hidden_size,
                           self.num_classes).to(self.device)
     self.dual_model = self.dual_net(self.hidden_size).to(self.device)
     self.criterion = nn.CrossEntropyLoss()
     self.optimizer_nn = torch.optim.Adam(self.model.parameters(),
                                          lr=self.learning_rate)
     self.optimizer_dual = torch.optim.Adam(self.dual_model.parameters(),
                                            lr=self.learning_rate * 10)
예제 #4
0
 def get_net_by_bn(self):
     net = get_net(bn_type="torch_bn")  #net_class
     return net
예제 #5
0
X = T.tensor4()
Z = T.tensor4()
ny = 10

discrim_opt = OrderedDict()
discrim_opt['input'] = (3, 32, 32)
discrim_opt['conv_relu_1'] = (128, 5, 5)
discrim_opt['pool_1'] = 2
discrim_opt['conv_relu_2'] = (128, 5, 5)
discrim_opt['pool_2'] = 2
discrim_opt['dense_relu'] = (1024,)
discrim_opt['dense_sigmoid'] = (128,)

print 'disciminator configuration'
print discrim_opt.keys()
discrim_net, discrim_NET = get_net(X, discrim_opt)

cond_opt = OrderedDict()
cond_opt['input'] = (3, 32, 32)
cond_opt['noise'] = 0.1
cond_opt['conv_batchnorm_relu_1_1'] = (96, 3, 3)
cond_opt['conv_batchnorm_relu_1_2'] = (96, 3, 3)
cond_opt['conv_batchnorm_relu_1_3'] = (96, 3, 3)
cond_opt['pool_1'] = 2
cond_opt['dropout_1'] = 0.5
cond_opt['conv_batchnorm_relu_2_1'] = (192, 3, 3)
cond_opt['conv_batchnorm_relu_2_2'] = (192, 3, 3)
cond_opt['conv_batchnorm_relu_2_3'] = (192, 3, 3)
cond_opt['pool_2'] = 2
cond_opt['dropout_2'] = 0.5
cond_opt['conv_batchnorm_relu_3_1'] = (192, 3, 3)
예제 #6
0
 def __init__(self, cmd, modelfile, deployfile, *args, **kwargs):
     super(NetVisualizer, self).__init__(cmd, *args, **kwargs)
     self.net = get_net(modelfile, deployfile)
예제 #7
0
                    help="query strategy")
args = parser.parse_args()
pprint(vars(args))
print()

# fix random seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.enabled = False

# device
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")

dataset = get_dataset(args.dataset_name)  # load dataset
net = get_net(args.dataset_name, device)  # load network
strategy = get_strategy(args.strategy_name)(dataset, net)  # load strategy

# start experiment
dataset.initialize_labels(args.n_init_labeled)
print(f"number of labeled pool: {args.n_init_labeled}")
print(f"number of unlabeled pool: {dataset.n_pool-args.n_init_labeled}")
print(f"number of testing pool: {dataset.n_test}")
print()

# round 0 accuracy
print("Round 0")
strategy.train()
preds = strategy.predict(dataset.get_test_data())
print(f"Round 0 testing accuracy: {dataset.cal_test_acc(preds)}")
예제 #8
0
            self.poly.xy = xy
            progress = self.valfmt % (int(discrete_val), int(max_val))
            self.valtext.set_text(progress)
            if self.drawon:
                self.ax.figure.canvas.draw()
            self.val = val
            if not self.eventson:
                return
            for cid, func in self.observers.items():
                func(discrete_val)

    def update_val_external(self, val, max_val):
        self.set_val(val, max_val)


net = get_net()
net.eval()

data_set = get_test_set()

tsDataloader = DataLoader(data_set,
                          batch_size=len(data_set),
                          shuffle=True,
                          num_workers=8,
                          collate_fn=data_set.collate_fn)

len_pred = int(args.time_pred / args.dt)
lossVals = torch.zeros(len_pred).to(args.device)
counts = torch.zeros(len_pred).to(args.device)

delta = 0.3
예제 #9
0
 def copy(self):
     w = self.get_net_w()
     net = get_net()
     net.load_state_dict(w)
     return DataProvider(net, self.dataloader)
예제 #10
0
import os

import cv2
from config import CHECKPOINT_PATH
from config import CLASSES
from utils import get_device
from utils import get_net
from utils import get_result

if __name__ == '__main__':
    device = get_device()
    num_classes = len(CLASSES)

    net = get_net('mobilenet_v2', 'val', device, num_classes=num_classes,
                  checkpoint_path=CHECKPOINT_PATH)

    # cap = cv2.VideoCapture(0)
    cap = cv2.VideoCapture(os.path.join('videos', 'video_01.mp4'))

    while True:
        _, frame = cap.read()
        result = get_result(frame, CLASSES, net, device)
        print(f'Result: {result}')

        # Display the resulting frame
        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
예제 #11
0
    def __init__(self, device, args, dataset, trunk_net, input_size, input_channel, n_class, n_branches, gate_type,
                 branch_net_names, gate_net_names, evalFn, lossFn):
        super(MyDeepTrunkNet, self).__init__()
        self.dataset = dataset
        self.input_size = input_size
        self.input_channel = input_channel
        self.n_class = n_class
        self.gate_type = gate_type
        self.n_branches = n_branches
        self.trunk_net = trunk_net
        self.evalFn = evalFn
        self.lossFn = lossFn

        assert gate_type in ["entropy", "net"], f"Unknown gate mode: {gate_type:s}"

        self.exit_ids = [-1] + list(range(n_branches))

        self.threshold = {exit_idx: args.gate_threshold for exit_idx in self.exit_ids[1:]}
        self.gate_nets = {}
        self.branch_nets = {}

        if len(branch_net_names) != n_branches:
            print("Number of branches does not match branch net names")
            branch_net_names = n_branches * branch_net_names[0:1]

        if gate_net_names is None:
            gate_net_names = branch_net_names
        elif len(gate_net_names) != n_branches:
            print("Number of branches does not match gate net names")
            gate_net_names = n_branches * gate_net_names[0:1]

        if args.load_branch_model is not None and len(args.load_branch_model) != n_branches:
            args.load_branch_model = n_branches * args.load_branch_model[0:1]
        if args.load_gate_model is not None and len(args.load_gate_model) != n_branches:
            args.load_gate_model = n_branches * args.load_gate_model[0:1]

        for i, branch_net_name in zip(range(n_branches), branch_net_names):
            exit_idx = self.exit_ids[i+1]
            self.branch_nets[exit_idx] = get_net(device, dataset, branch_net_name, input_size, input_channel, n_class,
                                                 load_model=None if args.load_branch_model is None else args.load_branch_model[i],
                                                  net_dim=args.cert_net_dim)

            if gate_type == "net":
                self.gate_nets[exit_idx] = get_net(device, dataset, gate_net_names[i], input_size, input_channel, 1,
                                                   load_model=None if args.load_gate_model is None else args.load_gate_model[i],
                                                   net_dim=args.cert_net_dim)
            else:
                self.gate_nets[exit_idx] = SeqNet(Sequential(*[*self.branch_nets[exit_idx].blocks, Entropy(n_class, low_mem=True, neg=True)]))
                self.gate_nets[exit_idx].determine_dims(torch.randn((2, input_channel, input_size, input_size), dtype=torch.float).to(device))
                init_slopes(self.gate_nets[exit_idx], device, trainable=False)

            self.add_module("gateNet_{}".format(exit_idx), self.gate_nets[exit_idx])
            self.add_module("branchNet_{}".format(exit_idx), self.branch_nets[exit_idx])

        if args.load_model is not None:
            old_state = self.state_dict()
            load_state = torch.load(args.load_model)
            if args.cert_net_dim is not None and not ("gateNet_0.blocks.layers.1.mean" in load_state.keys()): # Only change keys if loading from a non mixed resolution to mixed resolution
                new_dict = {}
                for k in load_state.keys():
                    if k.startswith("trunk"):
                        new_k = k
                    else:
                        k_match = re.match("(^.*\.layers\.)([0-9]+)(\..*$)", k)
                        new_k = "%s%d%s" % (k_match.group(1), int(k_match.group(2)) + 1, k_match.group(3))
                    new_dict[new_k] = load_state[k]
                load_state.update(new_dict)

            # LiRPA requires parameters to have zero batch dimension. This makes old models compatible
            for k, v in load_state.items():
                if k.endswith("mean") or k.endswith("sigma"):
                    if k in old_state:
                        load_state.update({k: v.reshape(old_state[k].shape)})

            old_state.update({k:v.view(old_state[k].shape) for k,v in load_state.items() if
                              k in old_state and (
                              (k.startswith("trunk") and args.load_trunk_model is None)
                              or (k.startswith("gate") and args.load_gate_model is None)
                              or (k.startswith("branch") and args.load_branch_model is None))})
            missing_keys, extra_keys = self.load_state_dict(old_state, strict=False)
            assert len([x for x in missing_keys if "gateNet" in x or "branchNet" in x]) == 0
            print("Whole model loaded from %s" % args.load_model)

            ## Trunk and branch nets have to be loaded after the whole model
            if args.load_trunk_model is not None:
                load_net_state(self.trunk_net, args.load_trunk_model)

        if (args.load_model is not None or args.load_gate_model is not None) and args.gate_feature_extraction is not None:
            for i, net in enumerate(self.gate_nets.values()):
                extraction_layer = [ii for ii in range(len(net.blocks)) if isinstance(net.blocks[ii],Linear)]
                extraction_layer = extraction_layer[-min(len(extraction_layer),args.gate_feature_extraction)]
                net.freeze(extraction_layer-1)

        self.trunk_cnet = None
        self.gate_cnets = {k: None for k in self.gate_nets.keys()}
        self.branch_cnets = {k: None for k in self.branch_nets.keys()}
def ScoreAverage(dps):
    """非树状聚合方式(即一起聚合)    计算SV方式:平均outputs在测试集上精度"""
    shapleyValue = ShapleyValue()
    tp = ThirdParty()

    db = DataBuyer(get_net())
    '''dps = []
    for i in range(params.provider_num):
        net = get_net()
        dataloader = get_data_loader(i)
        dps.append(DataProvider(net, dataloader))
    print('读取模型完成')'''

    # 随机聚合顺序
    '''order_rand = random_order(params.provider_num)
    print('聚合顺序', order_rand)
    '''

    # 构成树节点 放入tree_list
    tree_list = []

    for i in range(params.provider_num):
        tree_list.append(Tree(i, dps[i]))
    # 先在本地数据集上训练至收敛----------------

    # '''
    for i in range(params.provider_num):
        print("客户端", i, "预训练")
        for j in range(params.local_time):
            tree_list[i].provider.train()
    # '''
    # 计算SV-------------------
    print('开始计算SV')

    shapleyValue.v_way = 'score_avg'  # 计算v的方式fedavg和score_avg

    SVs = shapleyValue.cal_SV_all(tree_list)

    print("算得各个SV值:")
    for i in range(params.provider_num):
        tree_list[i].sv = SVs[i]
        print(SVs[i])

    # 找出SV>0的聚合-----------------
    positive_list = []
    for i in range(params.provider_num):
        if SVs[i] > 0:
            print(i, "SV>0并加入")
            positive_list.append(tree_list[i])

    net, acc = fedavg(positive_list, 100)

    print("聚合后精度", acc)

    # 写入txt
    txt_dir = params.dataset_division_testno + '/22.txt'
    write_txt(tree_list, 0, acc, txt_dir)
    # 把v_all写入txt
    v_all = shapleyValue.v_all
    print(v_all)
    npy_dir = params.dataset_division_testno + '/ScoreAverage_v_all_2.npy'
    write_npy_v_all(v_all, npy_dir)
def Original(dps):
    """原本聚合方式:FedAvg   +计算SV"""
    shapleyValue = ShapleyValue()

    db = DataBuyer(get_net())
    '''dps = []
    for i in range(params.provider_num):
        net = get_net()
        dataloader = get_data_loader(i)
        dps.append(DataProvider(net, dataloader))'''

    # 构成树节点 放入tree_list
    tree_list = []
    for i in range(params.provider_num):
        tree_list.append(Tree(i, dps[i]))

    # 预训练
    # _, p_fed = fedavg(tree_list)

    num_node = len(tree_list)

    # 先在本地数据集上训练至收敛----------------

    # '''
    for i in range(params.provider_num):
        print("客户端", i, "预训练")
        for j in range(params.local_time):
            tree_list[i].provider.train()
    # '''
    #
    print('开始计算SV')

    shapleyValue.v_way = 'fedavg'  # 计算v的方式fedavg和score_avg

    SVs = shapleyValue.cal_SV_all(tree_list)

    # 所有聚合后pab
    v_all = shapleyValue.v_all

    for i in range(num_node):
        tree_list[i].sv = SVs[i]
        print(SVs[i])

    # 最后剩一个节点为根
    root = tree_list[0]
    root.B = db.B
    # 根据树分配B
    all_B(root)

    # 根节点精确度
    p_root = shapleyValue.root_p

    # SV写入txt
    txt_dir = params.dataset_division_testno + '/21.txt'
    write_txt(tree_list, 0, p_root, txt_dir)
    # 把v_all写入txt
    v_all = shapleyValue.v_all
    print(v_all)
    npy_dir = params.dataset_division_testno + '/Original_v_all_2.npy'
    write_npy_v_all(v_all, npy_dir)
    # 第三方解密,发送结果给DP、DB
    return tree_list
def CollaborativeModelling(_tree_list=None):
    """树状聚合方式          +计算SV"""
    shapleyValue = ShapleyValue()
    tp = ThirdParty()

    db = DataBuyer(get_net())
    dps = []
    for i in range(params.provider_num):
        net = get_net()
        dataloader = get_data_loader(i)
        dps.append(DataProvider(net, dataloader))
    print('读取模型完成')

    # 随机聚合顺序
    '''order_rand = random_order(params.provider_num)
    print('聚合顺序', order_rand)
    '''

    # 构成树节点 放入tree_list
    tree_list = []

    if _tree_list is not None:
        for i in range(params.provider_num):
            tree_list.append(_tree_list[i])
    else:
        for i in range(params.provider_num):
            tree_list.append(Tree(i, dps[i]))
    """# 第三方生成密匙,传给DP、DB
    public_key, private_key = tp.generate_key()
    # DP加密model,发给DB
    for i in range(params.provider_num):
        dps[i].enctypt()
    # 聚合前先FedAvg   p_fed为fedavg的精度
    _, p_fed = fedavg(tree_list)
    """

    # 开始多次FedAvg、聚合
    last_node = tree_list[0]  # 上一次最优节点
    next_node_no = 1  # 接下来要聚合的开始节点编号

    node_K_list = [last_node]

    while next_node_no < params.provider_num:
        # print('len(node_K_list[0].children)', len(node_K_list[0].children))
        # 要聚合的K个节点
        num = 0
        while num < params.K - 1 and next_node_no < params.provider_num:
            node_K_list.append(tree_list[next_node_no])
            next_node_no += 1
            num += 1

        # K个provider聚合  可能不足K个
        num_node = len(node_K_list)
        # DB计算特征函数v,发送给第三方
        print('开始计算SV')
        SVs = shapleyValue.cal_SV_all(node_K_list)

        for i in range(num_node):
            node_K_list[i].sv = SVs[i]
            print(SVs[i])

        # 判断是否聚合
        num_aggregation = 0
        for i in range(num_node):
            if node_K_list[i].if_aggregation():
                num_aggregation += 1
        if num_aggregation == num_node:  # 全部同意聚合
            # 用聚合的模型建树
            net = shapleyValue.root_net
            # 暂时用第一个孩子的dataloader做聚合节点的dataloader
            p = DataProvider(net,
                             dataloader=get_data_loader(node_K_list[0].p_no))
            node = Tree(node_K_list[0].p_no, p)
            for i in range(num_node):
                node.children.append(node_K_list[i])
            # 记录上一次聚合的节点
            node_K_list = [node]
        else:
            # 选出SV最大的节点做根
            max_node = node_K_list[0]
            max_sv = node_K_list[0].sv
            for i in range(1, num_node):
                if node_K_list[i].sv > max_sv:
                    max_node = node_K_list[i]
                    max_sv = node_K_list[i].sv
            # 记录上一次最优的节点
            node_K_list = [max_node]

    # 最后剩一个节点为根
    root = node_K_list[0]
    root.B = db.B
    # 根据树分配B
    all_B(root)

    # 根节点精确度
    p_root = shapleyValue.root_p

    # 写入txt
    txt_dir = params.dataset_division_testno + '/10.txt'
    write_txt(tree_list, 0, p_root, txt_dir)
예제 #15
0
파일: main.py 프로젝트: eth-sri/ACE
def run(args=None):
    device = 'cuda' if torch.cuda.is_available() and (
        not args.no_cuda) else 'cpu'
    num_train, train_loader, test_loader, input_size, input_channel, n_class = get_loaders(
        args)

    lossFn = nn.CrossEntropyLoss(reduction='none')
    evalFn = lambda x: torch.max(x, dim=1)[1]

    net = get_net(device,
                  args.dataset,
                  args.net,
                  input_size,
                  input_channel,
                  n_class,
                  load_model=args.load_model,
                  net_dim=args.cert_net_dim
                  )  #, feature_extract=args.core_feature_extract)

    timestamp = int(time.time())
    model_signature = '%s/%s/%d/%s_%.5f/%d' % (args.dataset, args.exp_name,
                                               args.exp_id, args.net,
                                               args.train_eps, timestamp)
    model_dir = args.root_dir + 'models_new/%s' % (model_signature)
    args.model_dir = model_dir
    count_vars(args, net)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    if isinstance(net, UpscaleNet):
        relaxed_net = None
        relu_ids = None
    else:
        relaxed_net = RelaxedNetwork(net.blocks, args.n_rand_proj).to(device)
        relu_ids = relaxed_net.get_relu_ids()

    if "nat" in args.train_mode:
        cnet = CombinedNetwork(net,
                               relaxed_net,
                               lossFn=lossFn,
                               evalFn=evalFn,
                               device=device,
                               no_r_net=True).to(device)
    else:
        dummy_input = torch.rand((1, ) + net.dims[0],
                                 device=device,
                                 dtype=torch.float32)
        cnet = CombinedNetwork(net,
                               relaxed_net,
                               lossFn=lossFn,
                               evalFn=evalFn,
                               device=device,
                               dummy_input=dummy_input).to(device)

    n_epochs, test_nat_loss, test_nat_acc, test_adv_loss, test_adv_acc = args.n_epochs, None, None, None, None

    if 'train' in args.train_mode:
        tb_writer = SummaryWriter(model_dir)
        stats = Statistics(len(train_loader), tb_writer, model_dir)
        args_file = os.path.join(model_dir, 'args.json')
        with open(args_file, 'w') as fou:
            json.dump(vars(args), fou, indent=4)
        write_config(args, os.path.join(model_dir, 'run_config.txt'))

        eps = 0
        epoch = 0
        lr = args.lr
        n_epochs = args.n_epochs

        if "COLT" in args.train_mode:
            relu_stable = args.relu_stable
            # if args.layers is None:
            #     args.layers = [-2, -1] + relu_ids
            layers = get_layers(args.train_mode,
                                cnet,
                                n_attack_layers=args.n_attack_layers,
                                protected_layers=args.protected_layers)
        elif "adv" in args.train_mode:
            relu_stable = None
            layers = [-1, -1]
            args.mix = False
        elif "natural" in args.train_mode:
            relu_stable = None
            layers = [-2, -2]
            args.nat_factor = 1
            args.mix = False
        elif "diffAI" in args.train_mode:
            relu_stable = None
            layers = [-2, -2]
        else:
            assert False, "Unknown train mode %s" % args.train_mode

        print('Saving model to:', model_dir)
        print('Training layers: ', layers)

        for j in range(len(layers) - 1):
            opt, lr_scheduler = get_opt(cnet.net,
                                        args.opt,
                                        lr,
                                        args.lr_step,
                                        args.lr_factor,
                                        args.n_epochs,
                                        train_loader,
                                        args.lr_sched,
                                        fixup="fixup" in args.net)

            curr_layer_idx = layers[j + 1]
            eps_old = eps
            eps = get_scaled_eps(args, layers, relu_ids, curr_layer_idx, j)

            kappa_sched = Scheduler(0.0 if args.mix else 1.0, 1.0,
                                    num_train * args.mix_epochs, 0)
            beta_sched = Scheduler(
                args.beta_start if args.mix else args.beta_end, args.beta_end,
                args.train_batch * len(train_loader) * args.mix_epochs, 0)
            eps_sched = Scheduler(eps_old if args.anneal else eps, eps,
                                  num_train * args.anneal_epochs, 0)

            layer_dir = '{}/{}'.format(model_dir, curr_layer_idx)
            if not os.path.exists(layer_dir):
                os.makedirs(layer_dir)

            print('\nnew train phase: eps={:.5f}, lr={:.2e}, curr_layer={}\n'.
                  format(eps, lr, curr_layer_idx))

            for curr_epoch in range(n_epochs):
                train(device,
                      epoch,
                      args,
                      j + 1,
                      layers,
                      cnet,
                      eps_sched,
                      kappa_sched,
                      opt,
                      train_loader,
                      lr_scheduler,
                      relu_ids,
                      stats,
                      relu_stable,
                      relu_stable_protected=args.relu_stable_protected,
                      beta_sched=beta_sched)

                if isinstance(lr_scheduler, optim.lr_scheduler.StepLR
                              ) and curr_epoch >= args.mix_epochs:
                    lr_scheduler.step()

                if (epoch + 1) % args.test_freq == 0:
                    with torch.no_grad():
                        test_nat_loss, test_nat_acc, test_adv_loss, test_adv_acc = test(
                            device,
                            args,
                            cnet,
                            test_loader if args.test_set == "test" else
                            train_loader, [curr_layer_idx],
                            stats=stats,
                            log_ind=(epoch + 1) % n_epochs == 0)

                if (epoch + 1) % args.test_freq == 0 or (epoch +
                                                         1) % n_epochs == 0:
                    torch.save(
                        net.state_dict(),
                        os.path.join(layer_dir, 'net_%d.pt' % (epoch + 1)))
                    torch.save(
                        opt.state_dict(),
                        os.path.join(layer_dir, 'opt_%d.pt' % (epoch + 1)))

                stats.update_tb(epoch)
                epoch += 1
            relu_stable = None if relu_stable is None else relu_stable * args.relu_stable_layer_dec
            lr = lr * args.lr_layer_dec
        if args.cert:
            with torch.no_grad():
                diffAI_cert(
                    device,
                    args,
                    cnet,
                    test_loader if args.test_set == "test" else train_loader,
                    stats=stats,
                    log_ind=True,
                    epoch=epoch,
                    domains=args.cert_domain)
    elif args.train_mode == 'print':
        print('printing network to:', args.out_net_file)
        dummy_input = torch.randn(1,
                                  input_channel,
                                  input_size,
                                  input_size,
                                  device='cuda')
        net.skip_norm = True
        torch.onnx.export(net, dummy_input, args.out_net_file, verbose=True)
    elif args.train_mode == 'test':
        with torch.no_grad():
            test(device,
                 args,
                 cnet,
                 test_loader if args.test_set == "test" else train_loader,
                 [-1],
                 log_ind=True)
    elif args.train_mode == "cert":
        tb_writer = SummaryWriter(model_dir)
        stats = Statistics(len(train_loader), tb_writer, model_dir)
        args_file = os.path.join(model_dir, 'args.json')
        with open(args_file, 'w') as fou:
            json.dump(vars(args), fou, indent=4)
        write_config(args, os.path.join(model_dir, 'run_config.txt'))
        print('Saving results to:', model_dir)
        with torch.no_grad():
            diffAI_cert(
                device,
                args,
                cnet,
                test_loader if args.test_set == "test" else train_loader,
                stats=stats,
                log_ind=True,
                domains=args.cert_domain)
        exit(0)
    else:
        assert False, 'Unknown mode: {}!'.format(args.train_mode)

    return test_nat_loss, test_nat_acc, test_adv_loss, test_adv_acc
예제 #16
0
    checkpoint_path = os.path.join('checkpoint', 'checkpoint.pth')
    checkpoint_dir = os.path.dirname(checkpoint_path)
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    # torch.multiprocessing.freeze_support()
    dataset_path = os.path.join('data', 'cat_vs_dog', 'train')
    dataset_size, class_names, class_to_idx = get_metadata(dataset_path)

    # TensorBoard setup
    log_path = os.path.join('runs', 'experiment_1')
    if os.path.exists(log_path):
        shutil.rmtree(log_path)
    writer = SummaryWriter(log_path)

    train_loader = get_data_loader(dataset_path,
                                   batch_size=HyperParams['batch_size'])
    # eval_loader = get_data_loader(dataset_path, batch_size=dataset_size)
    eval_loader = None

    num_classes = len(class_names)
    model = get_net(classes=num_classes)
    model = main(model,
                 checkpoint_path,
                 HyperParams['input_size'],
                 train_loader,
                 eval_loader,
                 writer=writer,
                 epochs=HyperParams['epochs'])
    writer.close()
예제 #17
0
 def get_net_by_bn(self):
     net = get_net(bn_type="no")  #net_class
     return net
예제 #18
0
 def __init__(self):
     self.net = get_net()
     # self.lr = params.learning_rate           # 学习率
     self.w = {}  # 累计网络权重
     self.client_num = 0  # 累计网络权重时,客户端的数据量累计
예제 #19
0
def creat_model():
    for i in range(params.provider_num):
        save_provider_model(i, get_net())
예제 #20
0
from utils import get_metadata
from utils import get_net
from utils import get_prediction_class
from utils import preprocess_image

if __name__ == '__main__':
    device = get_device()

    # Training dataset metadata
    _, class_names, class_to_idx = get_metadata(sys.argv[1])
    num_classes = len(class_names)
    idx_to_class = {value: key for key, value in class_to_idx.items()}

    # Data preparation
    image = Image.open(sys.argv[2])

    # Net initialization
    net = get_net(classes=num_classes)
    checkpoint_dict = torch.load(os.path.join('checkpoint', 'checkpoint.pth'),
                                 map_location=device)
    net.load_state_dict(checkpoint_dict['model_state_dict'])
    net.eval()
    net.to(device)

    # Prediction
    image_tensor = preprocess_image(image, mode='val')
    image_tensor = image_tensor.to(device)
    prediction = net(image_tensor)
    result = get_prediction_class(prediction, idx_to_class)
    print(f'Result: {result}')
예제 #21
0
X = T.tensor4()
Z = T.tensor4()
ny = 10

discrim_opt = OrderedDict()
discrim_opt['input'] = (1, 28, 28)
discrim_opt['conv_relu_1'] = (64, 5, 5)
discrim_opt['pool_1'] = 2
discrim_opt['conv_relu_2'] = (64, 5, 5)
discrim_opt['pool_2'] = 2
discrim_opt['dense_relu'] = (1024, )
discrim_opt['dense_sigmoid'] = (128, )

print 'disciminator configuration'
print discrim_opt.keys()
discrim_net, discrim_NET = get_net(X, discrim_opt)

cond_opt = OrderedDict()
cond_opt['input'] = (1, 28, 28)
cond_opt['noise'] = 0.1
cond_opt['conv_batchnorm_relu_1'] = (64, 3, 3)
cond_opt['conv_batchnorm_relu_2'] = (64, 3, 3)
cond_opt['pool_1'] = 2
cond_opt['dropout_1'] = 0.5
cond_opt['conv_batchnorm_relu_3'] = (64, 3, 3)
cond_opt['conv_batchnorm_relu_4'] = (64, 3, 3)
cond_opt['pool_2'] = 2
cond_opt['dropout_2'] = 0.5
cond_opt['conv_batchnorm_relu_5'] = (64, 3, 3)
cond_opt['conv_batchnorm_relu_6'] = (64, 3, 3)
cond_opt['pool_3'] = -1
예제 #22
0
파일: train.py 프로젝트: hoangtnm/hackathon
    print(f'Training complete in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s')
    print(f'Best val Acc: {best_acc:4f}')

    # load best net weights
    net.load_state_dict(best_net_wts)
    return net


if __name__ == '__main__':

    checkpoint_path = os.path.join('checkpoint', 'checkpoint.pth')
    checkpoint_dir = os.path.dirname(checkpoint_path)
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    # TensorBoard setup
    log_path = os.path.join('runs', 'experiment_1')
    if os.path.exists(log_path):
        shutil.rmtree(log_path)
    writer = SummaryWriter(log_path)

    data_loaders = {x: get_data_loader(os.path.join(DATASET_PATH, x), batch_size=BATCH_SIZE, mode=x)
                    for x in ['train', 'val']}

    device = get_device()
    num_classes = len(CLASSES)
    model = get_net(model_name='mobilenet_v2', mode='train', device=device,
                    pretrained=True, num_classes=num_classes)
    model = main(model, device, checkpoint_path, data_loaders, writer=writer, epochs=EPOCHS)
    writer.close()