示例#1
0
def eval_inference_simple(G, F1, class_list, class_num_list, args,  alpha=1, gamma=1):
    target_loader_unl, class_list = return_dataset_test(args)
    im_data_t = torch.FloatTensor(1)
    gt_labels_t = torch.LongTensor(1)

    im_data_t = im_data_t.cuda()
    gt_labels_t = gt_labels_t.cuda()

    im_data_t = Variable(im_data_t)
    gt_labels_t = Variable(gt_labels_t)
    G.eval()
    F1.eval()
    size = 0
    test_loss = 0
    correct = 0
    
    num_class = len(class_list)
    output_all = np.zeros((0, num_class))
    criterion = cb_focal_loss(class_num_list)
    confusion_matrix = torch.zeros(num_class, num_class) 
    
    global_paths=[]
    global_cosine_sim=[]
    
    with torch.no_grad():
        for batch_idx, data_t in enumerate(target_loader_unl):
            im_data_t.data.resize_(data_t[0].size()).copy_(data_t[0])
            gt_labels_t.data.resize_(data_t[1].size()).copy_(data_t[1])
            paths = list(data_t[2])
            feat = G(im_data_t)
            if "resnet" in args.net:
                output1 = F1.fc1(feat)
            output1 = F1.extrafc(output1)
            output1 = F.normalize(output1, dim=1)
            output1 = output1.mm(torch.transpose(F.normalize(F1.fc2.weight, dim=1),0,1))
            size += im_data_t.size(0)
            cosine_sim = output1.data.max(1)[0]
            global_cosine_sim.extend(cosine_sim)
            global_paths.extend(paths)
            # computing accuracy 
            pred1 = output1.data.max(1)[1]
            for t, p in zip(gt_labels_t.view(-1), pred1.view(-1)):
                confusion_matrix[t.long(), p.long()] += 1
            correct += pred1.eq(gt_labels_t.data).cpu().sum()
            test_loss += criterion(output1, gt_labels_t) / len(target_loader_unl)
    print('\nTest set: Average loss: {:.4f}, '
          'Accuracy: {}/{} F1 ({:.4f}%)\n'.
          format(test_loss, correct, size,
                 100. * float(correct) / size))
    
    paths_to_weights={}
    for i, path in enumerate(global_paths):
        paths_to_weights[path] = alpha*(1-global_cosine_sim[i])**gamma + 1.0

    return test_loss.data, 100. * float(correct) / size, paths_to_weights
示例#2
0
                    default=5,
                    metavar='S',
                    help='early stopping to wait for improvment '
                    'before terminating. (default: 5 (5000 iterations))')
parser.add_argument('--early',
                    action='store_false',
                    default=True,
                    help='early stopping on validation or not')

args = parser.parse_args()
print(
    'Dataset: %s; Source : %s; Target: %s; Labeled num perclass: %s; Network: %s'
    % (args.dataset, args.source, args.target, args.num, args.net))
source_loader, target_loader, target_loader_unl, target_loader_val, \
    target_loader_test, class_list = return_dataset(args)
target_loader_test, _ = return_dataset_test(args)

use_gpu = torch.cuda.is_available()
record_dir = 'record/%s/%s' % (args.dataset, args.method)
if not os.path.exists(record_dir):
    os.makedirs(record_dir)
record_file = os.path.join(
    record_dir, '%s_net_%s_%s_to_%s_num_%s' %
    (args.method, args.net, args.source, args.target, args.num))

torch.cuda.manual_seed(args.seed)
if args.net == 'resnet34':
    G = resnet34()
    inc = 512
elif args.net == 'resnet50':
    G = resnet50()
示例#3
0
def eval_inference(G, F1, class_list, class_num_list, args):
    target_loader_unl, class_list = return_dataset_test(args)
    im_data_t = torch.FloatTensor(1)
    gt_labels_t = torch.LongTensor(1)

    im_data_t = im_data_t.cuda()
    gt_labels_t = gt_labels_t.cuda()

    im_data_t = Variable(im_data_t)
    gt_labels_t = Variable(gt_labels_t)
    G.eval()
    F1.eval()
    size = 0
    test_loss = 0
    correct = 0

    num_class = len(class_list)
    output_all = np.zeros((0, num_class))
    criterion = cb_focal_loss(class_num_list)
    confusion_matrix = torch.zeros(num_class, num_class)

    global_paths = []
    global_pred1 = []
    global_cosine_sim = []
    with torch.no_grad():
        for batch_idx, data_t in enumerate(target_loader_unl):
            im_data_t.data.resize_(data_t[0].size()).copy_(data_t[0])
            gt_labels_t.data.resize_(data_t[1].size()).copy_(data_t[1])
            paths = list(data_t[2])
            feat = G(im_data_t)
            if "resnet" in args.net:
                output1 = F1.fc1(feat)
            else:
                output1 = feat
            output1 = F1.extrafc(output1)
            output1 = F.normalize(output1, dim=1)
            output1 = output1.mm(
                torch.transpose(F.normalize(F1.fc2.weight, dim=1), 0, 1))
            size += im_data_t.size(0)
            cosine_sim = output1.data.max(1)[0]
            pred1 = output1.data.max(1)[1]
            global_pred1.extend(pred1)
            global_cosine_sim.extend(cosine_sim)
            global_paths.extend(paths)
            for t, p in zip(gt_labels_t.view(-1), pred1.view(-1)):
                confusion_matrix[t.long(), p.long()] += 1
            correct += pred1.eq(gt_labels_t.data).cpu().sum()
            test_loss += criterion(output1,
                                   gt_labels_t) / len(target_loader_unl)
    print('\nTest set: Average loss: {:.4f}, '
          'Accuracy: {}/{} F1 ({:.4f}%)\n'.format(
              test_loss, correct, size, 100. * float(correct) / size))
    class_wise_sim_path = {}
    global_weights = [1 for _ in range(len(global_paths))]
    for i, pred1 in enumerate(global_pred1):
        if str(pred1.item()) not in class_wise_sim_path:
            class_wise_sim_path[str(
                pred1.item())] = [[global_cosine_sim[i].item()],
                                  [global_paths[i]]]
        else:
            class_wise_sim_path[str(pred1.item())][0].append(
                global_cosine_sim[i].item())  #append the cosine similarity
            class_wise_sim_path[str(pred1.item())][1].append(
                global_paths[i])  #append the path
    for pred in class_wise_sim_path.keys():
        sorted_paths = [
            path for _, path in sorted(
                zip(class_wise_sim_path[pred][0], class_wise_sim_path[pred]
                    [1]))
        ]  # zip cosine sim and paths and sort them wrt cosine sim
        top_sorted_paths = sorted_paths[:int(
            0.1 * len(sorted_paths))]  # take top 10 percentile paths
        for top_sorted_path in top_sorted_paths:
            global_weights[global_paths.index(top_sorted_path)] = 1.2

    paths_to_weights = {}
    for i, path in enumerate(global_paths):
        paths_to_weights[path] = global_weights[i]
    return test_loss.data, 100. * float(correct) / size, paths_to_weights
示例#4
0
                    default='sketch',
                    metavar='B',
                    help='board dir')
parser.add_argument('--dataset',
                    type=str,
                    default='multi',
                    choices=['multi'],
                    help='the name of dataset, multi is large scale dataset')
parser.add_argument('--num',
                    type=int,
                    default=3,
                    help='number of labeled examples in the target')
args = parser.parse_args()
print('dataset %s source %s target %s network %s' %
      (args.dataset, args.source, args.target, args.net))
target_loader_unl, class_list = return_dataset_test(args)
use_gpu = torch.cuda.is_available()

if args.net == 'resnet34':
    G = resnet34()
    inc = 512
elif args.net == 'resnet50':
    G = resnet50()
    inc = 2048
elif args.net == "alexnet":
    G = AlexNetBase()
    inc = 4096
elif args.net == "vgg":
    G = VGGBase()
    inc = 4096
else:
示例#5
0
def eval_inference(G,
                   F1,
                   class_list,
                   class_num_list,
                   args,
                   step,
                   alpha=2,
                   gamma=1):
    output_file = "entropy_weight_files/%s_%s_%s_%s_%s.txt" % (
        args.method, args.net, args.source, args.target, str(step))
    target_loader_unl, class_list = return_dataset_test(args)
    im_data_t = torch.FloatTensor(1)
    gt_labels_t = torch.LongTensor(1)

    im_data_t = im_data_t.cuda()
    gt_labels_t = gt_labels_t.cuda()

    im_data_t = Variable(im_data_t)
    gt_labels_t = Variable(gt_labels_t)
    G.eval()
    F1.eval()
    size = 0
    test_loss = 0
    correct = 0

    num_class = len(class_list)
    output_all = np.zeros((0, num_class))
    criterion = cb_focal_loss(class_num_list)
    confusion_matrix = torch.zeros(num_class, num_class)

    global_paths = []
    global_cosine_sim = []

    with torch.no_grad():
        for batch_idx, data_t in enumerate(target_loader_unl):
            im_data_t.data.resize_(data_t[0].size()).copy_(data_t[0])
            gt_labels_t.data.resize_(data_t[1].size()).copy_(data_t[1])
            paths = data_t[2]
            feat = G(im_data_t)
            if "resnet" in args.net:
                output1 = F1.fc1(feat)
                output1 = F1.fc2(output1)
                output1 = F.normalize(output1, dim=1)
                output1 = output1.mm(
                    torch.transpose(F.normalize(F1.fc3.weight, dim=1), 0, 1))
            else:
                #print(feat.shape)
                output1 = F1.fc1(feat)
                #print(output1.shape)
                output1 = F.normalize(output1, dim=1)
                #print(output1.shape)
                #print(F1.fc2.weight.shape)
                output1 = output1.mm(
                    torch.transpose(F.normalize(F1.fc2.weight, dim=1), 0, 1))

            size += im_data_t.size(0)
            cosine_sim = output1.data.max(1)[0]
            global_cosine_sim.extend(cosine_sim)
            global_paths.extend(paths)
            # computing accuracy
            pred1 = output1.data.max(1)[1]
            for t, p in zip(gt_labels_t.view(-1), pred1.view(-1)):
                confusion_matrix[t.long(), p.long()] += 1
            correct += pred1.eq(gt_labels_t.data).cpu().sum()
            test_loss += criterion(output1,
                                   gt_labels_t) / len(target_loader_unl)
    print('\nTest set: Average loss: {:.4f}, '
          'Accuracy: {}/{} F1 ({:.4f}%)\n'.format(
              test_loss, correct, size, 100. * float(correct) / size))

    weight_dict = {}

    with open(output_file, "w") as f:
        for i, path in enumerate(global_paths):
            f.write("%f %s\n" % (alpha *
                                 (1 - global_cosine_sim[i])**gamma, path))

    for i, path in enumerate(global_paths):
        weight_dict[path] = float(alpha * (1 - global_cosine_sim[i])**gamma)

    return test_loss.data, 100. * float(correct) / size, weight_dict
示例#6
0
def eval_inference_simple(G, F1, class_list, class_num_list, args):
    output_file = "entropy_weight_files/%s_%s_%s_%s.txt" % (
        args.method, args.net, args.source, args.target)
    target_loader_unl, class_list = return_dataset_test(args)
    im_data_t = torch.FloatTensor(1)
    gt_labels_t = torch.LongTensor(1)

    im_data_t = im_data_t.cuda()
    gt_labels_t = gt_labels_t.cuda()

    im_data_t = Variable(im_data_t)
    gt_labels_t = Variable(gt_labels_t)
    G.eval()
    F1.eval()
    size = 0
    global_paths = []
    global_pred1 = []
    global_cosine_sim = []
    with torch.no_grad():
        for batch_idx, data_t in enumerate(target_loader_unl):
            im_data_t.data.resize_(data_t[0].size()).copy_(data_t[0])
            gt_labels_t.data.resize_(data_t[1].size()).copy_(data_t[1])
            paths = data_t[2]
            feat = G(im_data_t)
            if "resnet" in args.net:
                output1 = F1.fc1(feat)
            output1 = F1.extrafc(output1)
            output1 = F.normalize(output1, dim=1)
            output1 = output1.mm(
                torch.transpose(F.normalize(F1.fc2.weight, dim=1), 0, 1))
            size += im_data_t.size(0)
            cosine_sim = output1.data.max(1)[0]
            pred1 = output1.data.max(1)[1]
            global_pred1.extend(pred1)
            global_cosine_sim.extend(cosine_sim)
            global_paths.extend(paths)
    class_wise_sim_path = {}
    global_weights = [1 for _ in range(len(global_paths))]
    for i, pred1 in enumerate(global_pred1):
        if str(pred1.item()) not in class_wise_sim_path:
            class_wise_sim_path[str(
                pred1.item())] = [[global_cosine_sim[i].item()],
                                  [global_paths[i]]]
        else:
            class_wise_sim_path[str(pred1.item())][0].append(
                global_cosine_sim[i].item())  #append the cosine similarity
            class_wise_sim_path[str(pred1.item())][1].append(
                global_paths[i])  #append the path
    for pred in class_wise_sim_path.keys():
        sorted_paths = [
            path for _, path in sorted(
                zip(class_wise_sim_path[pred][0], class_wise_sim_path[pred]
                    [1]))
        ]  # zip cosine sim and paths and sort them wrt cosine sim
        top_sorted_paths = sorted_paths[:int(
            0.1 * len(sorted_paths))]  # take top 10 percentile paths
        for top_sorted_path in top_sorted_paths:
            global_weights[global_paths.index(top_sorted_path)] = 1.2

    with open(output_file, "w") as f:
        for i, path in enumerate(global_paths):
            f.write("%f %s\n" % (global_weights[i], path))
    return