def prefetch_test(opt):
  os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

  Dataset = dataset_factory[opt.dataset]
  opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
  print(opt)
  Logger(opt)
  Detector = detector_factory[opt.task]
  
  split = 'test'
  dataset = Dataset(opt, split)
  detector = Detector(opt)
  data_loader = torch.utils.data.DataLoader(
    PrefetchDataset(opt, dataset, detector.pre_process), 
    batch_size=1, shuffle=False, num_workers=1, pin_memory=True)

  results = {}
  num_iters = len(dataset)
  print("Preprocessed data")
  bar = Bar('{}'.format(opt.exp_id), max=num_iters)
  time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
  avg_time_stats = {t: AverageMeter() for t in time_stats}
  for ind, (img_id, pre_processed_images) in enumerate(data_loader):
    ret = detector.run(pre_processed_images)
    results[img_id.numpy().astype(np.int64)[0]] = ret['results']
    Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
                   ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
    for t in avg_time_stats:
      avg_time_stats[t].update(ret[t])
      Bar.suffix = Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(
        t, tm = avg_time_stats[t])
    bar.next()
  bar.finish()
  dataset.run_eval(results, opt.save_dir)
def test(opt):
  os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str

  Dataset = dataset_factory[opt.dataset]
  opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
  print(opt)
  Logger(opt)
  Detector = detector_factory[opt.task]
  
  split = 'test'
  dataset = Dataset(opt, split)
  detector = Detector(opt)

  results = {}
  num_iters = len(dataset)
  bar = Bar('{}'.format(opt.exp_id), max=num_iters)
  time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
  avg_time_stats = {t: AverageMeter() for t in time_stats}
  for ind in range(num_iters):
    img_id = dataset.images[ind]
    img_info = dataset.coco.loadImgs(ids=[img_id])[0]
    img_path = os.path.join(dataset.img_dir, img_info['file_name'])

    if opt.task == 'ddd':
      ret = detector.run(img_path, img_info['calib'])
    else:
      ret = detector.run(img_path)
    
    results[img_id] = ret['results']

    Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
                   ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
    for t in avg_time_stats:
      avg_time_stats[t].update(ret[t])
      Bar.suffix = Bar.suffix + '|{} {:.3f} '.format(t, avg_time_stats[t].avg)
    bar.next()
  bar.finish()
  dataset.run_eval(results, opt.save_dir)
Exemple #3
0
def main(opt):
  torch.manual_seed(opt.seed)
  torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
  Dataset = get_dataset(opt.dataset, opt.task)
  opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
  print(opt)

  logger = Logger(opt)

  os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
  opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
  
  print('Creating model...')
  # Model creation -> pick backbone, heads, and head convolution 
  model = create_model(opt.arch, opt.heads, opt.head_conv)
  optimizer = torch.optim.Adam(model.parameters(), opt.lr)
  start_epoch = 0
  if opt.load_model != '':
    model, optimizer, start_epoch = load_model(
      model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)

  Trainer = train_factory[opt.task]
  trainer = Trainer(opt, model, optimizer)
  trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

  print('Setting up data...')
  val_loader = torch.utils.data.DataLoader(
      Dataset(opt, 'val'), 
      batch_size=1, 
      shuffle=False,
      num_workers=1,
      pin_memory=True
  )

  if opt.test:
    _, preds = trainer.val(0, val_loader)
    val_loader.dataset.run_eval(preds, opt.save_dir)
    return

  if opt.export_onnx: 
    print('Exporting onnx model')

    # TODO: adapt the input size to the onnx 
    width   = opt.input_res
    height  = opt.input_res

    # create a dummy input that would be used to export the model
    #dummy_input = torch.randn(10, 3, width, height, device='cuda')

    # this method does not support variable input sizes 
    #torch.onnx.export(model, dummy_input, 
    #                  os.path.join(opt.save_dir, 'model.onnx'), 
    #                  verbose=True)

    flops, params = profile(model, input_size=(1,3,width, height), device='cuda')
    print(width, height, flops, params)
    print('Model exported. Done!')
    return


  train_loader = torch.utils.data.DataLoader(
      Dataset(opt, 'train'), 
      batch_size=opt.batch_size, 
      shuffle=True,
      num_workers=opt.num_workers,
      pin_memory=True,
      drop_last=True
  )

  print('Starting training...')
  best = 1e10
  for epoch in range(start_epoch + 1, opt.num_epochs + 1):
    mark = epoch if opt.save_all else 'last'
    log_dict_train, _ = trainer.train(epoch, train_loader)
    logger.write('epoch: {} |'.format(epoch))
    for k, v in log_dict_train.items():
      logger.scalar_summary('train_{}'.format(k), v, epoch)
      logger.write('{} {:8f} | '.format(k, v))
    if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
      save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)), 
                 epoch, model, optimizer)
      with torch.no_grad():
        log_dict_val, preds = trainer.val(epoch, val_loader)
      for k, v in log_dict_val.items():
        logger.scalar_summary('val_{}'.format(k), v, epoch)
        logger.write('{} {:8f} | '.format(k, v))
      if log_dict_val[opt.metric] < best:
        best = log_dict_val[opt.metric]
        save_model(os.path.join(opt.save_dir, 'model_best.pth'), 
                   epoch, model)
    else:
      save_model(os.path.join(opt.save_dir, 'model_last.pth'), 
                 epoch, model, optimizer)
    logger.write('\n')
    if epoch in opt.lr_step:
      save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)), 
                 epoch, model, optimizer)
      lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
      print('Drop LR to', lr)
      for param_group in optimizer.param_groups:
          param_group['lr'] = lr
  logger.close()
Exemple #4
0
      logger.scalar_summary('train_{}'.format(k), v, epoch)
      logger.write('{} {:8f} | '.format(k, v))
    if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
      save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)), 
                 epoch, model, optimizer)
      with torch.no_grad():
        log_dict_val, preds = trainer.val(epoch, val_loader)
      for k, v in log_dict_val.items():
        logger.scalar_summary('val_{}'.format(k), v, epoch)
        logger.write('{} {:8f} | '.format(k, v))
      if log_dict_val[opt.metric] < best:
        best = log_dict_val[opt.metric]
        save_model(os.path.join(opt.save_dir, 'model_best.pth'), 
                   epoch, model)
    else:
      save_model(os.path.join(opt.save_dir, 'model_last.pth'), 
                 epoch, model, optimizer)
    logger.write('\n')
    if epoch in opt.lr_step:
      save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)), 
                 epoch, model, optimizer)
      lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
      print('Drop LR to', lr)
      for param_group in optimizer.param_groups:
          param_group['lr'] = lr
  logger.close()

if __name__ == '__main__':
  opt = opts().parse()
  main(opt)
Exemple #5
0

def log_config(log_file):

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(asctime)s ===> %(message)s',
                                  datefmt='%Y-%m-%d %H:%M:%S')

    # config FileHandler to save log file
    fh = logging.FileHandler(log_file)
    fh.setLevel(logging.DEBUG)
    fh.setFormatter(formatter)

    # config StreamHandler to print log to console
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    ch.setFormatter(formatter)

    # add the two Handler
    logger.addHandler(ch)
    logger.addHandler(fh)


if __name__ == '__main__':
    config = opts()
    os.environ['CUDA_VISIBLE_DEVICES'] = config.GPU
    assert torch.cuda.is_available(), "Currently, we only support CUDA version"
    cudnn.benchmark = True
    main()
from fold_data import dataset
from fold_util import F_Info
import torch as t
from fold_util import F_Normalize as F_Nor
from fold_defense import simGraph_init
from fold_defense import low_pass_adj_sym
from sklearn.metrics.pairwise import euclidean_distances
import torch.nn.functional as F
from train import F_accuracy
import numpy as np
import scipy.sparse as sp
from deeprobust.graph import utils

model_path = "../checkpoint"

opt = opts()
opt.data_path = r"../fold_data/Data/Planetoid"
opt.model_path = "../checkpoint"
opt.dataset = 'cora'
opt.model = 'GCN'

# 读取数据集
data_load = dataset.c_dataset_loader(opt.dataset, opt.data_path)
base_adj, base_feat, label, idx_train, idx_val, idx_test = data_load.process_data(
)
label_not_one_hot = F_Info.F_one_hot_to_label(label)
data_info = F_Info.C_per_info(base_adj, base_feat, label, idx_train, idx_val,
                              idx_test, opts)

adj_nor = F_Nor.normalize_adj_sym(base_adj)
feat_nor = F_Nor.normalize_feat(base_feat)
Exemple #7
0
def train(p_adj_np: np.ndarray,
          p_feat_np: np.ndarray,
          p_labels_np: np.ndarray,
          p_idx_train_np,
          p_idx_val_np,
          is_normalize_feat=False):
    use_gpu = t.cuda.is_available()
    random.seed(40)
    np.random.seed(40)
    t.manual_seed(40)
    best_acc = 0
    opt = opts()
    label_tensor = t.LongTensor(np.where(p_labels_np)[1])

    # 邻接矩阵对角线元素加1
    p_adj_np = (sp.csr_matrix(p_adj_np) + sp.eye(p_adj_np.shape[1])).A
    degree_np = F_Nor.normalize_adj_degree(p_adj_np)  # D^0.5
    '''特征标准化?'''
    if is_normalize_feat:
        feat_nor_np = F_Nor.normalize_feat(p_feat_np)  # 按行标准化特征p
    else:
        feat_nor_np = p_feat_np
    """模型预定义"""
    model = GCN(nfeat=feat_nor_np.shape[1],
                nhid=opt.num_hiden_layer,
                nclass=label_tensor.max().item() + 1,
                dropout=opt.drop_out,
                init=opt.init_type)
    """优化器定义"""
    if opt.optim == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=opt.lr,
                               weight_decay=opt.weight_decay)
    elif opt.optim == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr=opt.lr,
                              weight_decay=opt.weight_decay)
    else:
        raise NotImplementedError
    """Numpy to Tensor"""
    adj_tensor = t.from_numpy(p_adj_np).float()
    feat_nor_tensor = t.from_numpy(feat_nor_np).float()

    idx_train_tensor = t.from_numpy(p_idx_train_np).long()
    idx_val_tensor = t.from_numpy(p_idx_val_np).long()
    degree_tensor = t.from_numpy(degree_np).float()
    """Tensor CPU -> GPU"""
    if use_gpu:
        model.cuda()
        adj_tensor, feat_nor_tensor, label_tensor, idx_train_tensor, idx_val_tensor, degree_tensor = \
            list(map(lambda x: x.cuda(),
                     [adj_tensor, feat_nor_tensor, label_tensor, idx_train_tensor, idx_val_tensor, degree_tensor]))

    adj_tensor, feat_nor_tensor, label_tensor, degree_tensor = list(
        map(lambda x: Variable(x),
            [adj_tensor, feat_nor_tensor, label_tensor, degree_tensor]))

    feat_nor_tensor.requires_grad = True

    # 对称拉普拉斯 D^-0.5 * A * D^-0.5
    D_Adj_tensor = t.mm(degree_tensor, adj_tensor).cuda()  # D^-0.5 * A
    adj_nor_tensor = t.mm(D_Adj_tensor,
                          degree_tensor).cuda()  # D^-0.5 * A * D^-0.5
    """保存模型"""
    save_point = os.path.join('./checkpoint', opt.dataset)
    if not os.path.isdir(save_point):
        os.mkdir(save_point)

    for epoch in np.arange(1, opt.epoch + 1):
        model.train()  # 训练模式

        optimizer.lr = F_lr_scheduler(epoch, opt)  # 学习率增减
        optimizer.zero_grad()  # 重置梯度

        output = model(feat_nor_tensor,
                       adj_nor_tensor)  # 输出模型结果 [cora : 7 class]
        loss_train = F.nll_loss(output[idx_train_tensor],
                                label_tensor[idx_train_tensor])
        acc_train = F_accuracy(output[idx_train_tensor],
                               label_tensor[idx_train_tensor])

        loss_train.backward()  # 反向传播
        optimizer.step()  # 优化参数

        # Validation
        model.eval()
        output = model(feat_nor_tensor, adj_nor_tensor)
        acc_val = F_accuracy(output[idx_val_tensor],
                             label_tensor[idx_val_tensor])

        if acc_val > best_acc:
            best_acc = acc_val
            state = {
                'model': model,
                'acc': best_acc,
                'epoch': epoch,
            }
            t.save(state, os.path.join(
                save_point, '%s.t7' % opt.model))  # 保存成以模型为名称的.t7文件 eg. GCN.t7
        if epoch % 10 == 0:
            sys.stdout.flush()
            sys.stdout.write('\r')
            sys.stdout.write(" => Training Epoch #{}".format(epoch))
            sys.stdout.write(" | Training acc : {:6.2f}%".format(
                acc_train.data.cpu().numpy() * 100))
            sys.stdout.write(" | Learning Rate: {:6.4f}".format(optimizer.lr))
            sys.stdout.write(" | Best acc : {:.2f}".format(
                best_acc.data.cpu().numpy() * 100))
Exemple #8
0
def Train_PyG(p_adj_np: np.ndarray, p_feat_np: np.ndarray,
              p_label_np: np.ndarray, p_idx_train: np.ndarray,
              p_idx_val: np.ndarray):
    use_gpu = t.cuda.is_available()
    random.seed(40)
    np.random.seed(40)
    t.manual_seed(40)
    best_acc = 0
    opt = opts()
    p_adj_np = p_adj_np + np.eye(p_adj_np.shape[0])
    label_tensor = t.LongTensor(np.where(p_label_np)[1])

    # 由邻接矩阵获取Edge List
    edge_list_np = F_Nor.get_edge_list(p_adj_np)
    """特征标准化"""
    feat_nor_np = p_feat_np
    # feat_nor_np = F_Nor.normalize_feat(p_feat_np)
    """模型预定义"""
    model = GCN_PyG(nfeat=feat_nor_np.shape[1],
                    nhid=opt.num_hiden_layer,
                    nclass=label_tensor.max().item() + 1)
    """优化器定义"""
    if opt.optim == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=opt.lr,
                               weight_decay=opt.weight_decay)
    elif opt.optim == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr=opt.lr,
                              weight_decay=opt.weight_decay)
    else:
        raise NotImplementedError
    """Numpy to Tensor"""
    feat_nor_tensor = t.from_numpy(feat_nor_np).float().cuda()
    edge_list_tensor = t.from_numpy(edge_list_np).long().cuda()
    label_tensor = t.from_numpy(np.where(p_label_np)[1]).long().cuda()
    idx_train_tensor = t.from_numpy(p_idx_train).long().cuda()
    idx_val_tensor = t.from_numpy(p_idx_val).long().cuda()
    model.cuda()

    edge_list_tensor, feat_nor_tensor, label_tensor = list(
        map(lambda x: Variable(x),
            [edge_list_tensor, feat_nor_tensor, label_tensor]))

    feat_nor_tensor.requires_grad = True
    """保存模型"""
    save_point = os.path.join('./checkpoint', "GCN_PyG")
    if not os.path.isdir(save_point):
        os.mkdir(save_point)

    for epoch in np.arange(1, opt.epoch + 1):
        model.train()  # 训练阶段

        optimizer.lr = F_lr_scheduler(epoch, opt)  # 学习率衰减
        optimizer.zero_grad()

        output = model(feat_nor_tensor, edge_list_tensor)
        loss_train = F.nll_loss(output[idx_train_tensor],
                                label_tensor[idx_train_tensor])
        acc_train = F_accuracy(output[idx_train_tensor],
                               label_tensor[idx_train_tensor])

        loss_train.backward()
        optimizer.step()

    # validation
    model.eval()
    # output = model(feat_nor_tensor, edge_list_tensor)
    # acc_val = F_accuracy(output[idx_val_tensor], label_tensor[idx_val_tensor])
    _, pred = model(feat_nor_tensor, edge_list_tensor).max(dim=1)
    correct = float(pred[idx_val_tensor].eq(
        label_tensor[idx_val_tensor]).sum().item())
    acc = correct / idx_val_tensor.sum().item()
    print("acc : {:.4f}".format(acc))
Exemple #9
0
def test(p_adj_np: np.ndarray,
         p_feat_np: np.ndarray,
         p_labels_np: np.ndarray,
         p_index_test: np.ndarray,
         normalize_feat=False):
    print("\nTesting")
    use_gpu = t.cuda.is_available()
    opt = opts()

    degree_np = F_Nor.normalize_adj_degree(p_adj_np)

    if normalize_feat:
        feat_nor_np = F_Nor.normalize_feat(p_feat_np)
    else:
        feat_nor_np = p_feat_np

    # 邻接矩阵对角线元素加1
    p_adj_np = (sp.csr_matrix(p_adj_np) + sp.eye(p_adj_np.shape[1])).A
    degree_np = F_Nor.normalize_adj_degree(p_adj_np)  # D^0.5

    # Numpy -> Tensor
    adj_tensor = t.from_numpy(p_adj_np).float()
    feat_nor_tensor = t.from_numpy(feat_nor_np).float()
    label_tensor = t.from_numpy(np.where(p_labels_np)[1]).long()
    idx_test_tensor = t.from_numpy(p_index_test).long()
    degree_tensor = t.from_numpy(degree_np).float()
    '''输出获得的矩阵维度'''
    print("\nObtain(Adj,Feat,Label) matrix")
    print("| Adj : {}".format(p_adj_np.shape))
    print("| Feat: {}".format(feat_nor_np.shape))
    print("| label:{}".format(p_labels_np.shape))

    opt.model_path = "../checkpoint"
    load_model = t.load("{}/{}/{}.t7".format(opt.model_path, opt.dataset,
                                             opt.model))
    model = load_model['model'].cpu()
    acc_val = load_model['acc']
    print("best epoch was : {}".format(load_model['epoch']))

    if use_gpu:
        model.cuda()
        adj_tensor, feat_nor_tensor, label_tensor, idx_test_tensor, degree_tensor = list(
            map(lambda x: x.cuda(), [
                adj_tensor, feat_nor_tensor, label_tensor, idx_test_tensor,
                degree_tensor
            ]))

    adj_tensor, feat_nor_tensor, label_tensor, degree_tensor = list(
        map(lambda x: t.autograd.Variable(x),
            [adj_tensor, feat_nor_tensor, label_tensor, degree_tensor]))

    feat_nor_tensor.requires_grad = True

    # 对称拉普拉斯
    D_Adj_tensor = t.mm(degree_tensor, adj_tensor).cuda()
    adj_nor_tensor = t.mm(D_Adj_tensor, degree_tensor).cuda()

    model.eval()
    output = model(feat_nor_tensor, adj_nor_tensor)
    acc_test = F_accuracy(output[idx_test_tensor],
                          label_tensor[idx_test_tensor])

    print("test_acc = {}".format(acc_test))
    print("val_acc = {}".format(acc_val))

    return acc_test