예제 #1
0
def eval_imagenet_q(net_name, param_pickle_path):
    netparams = load.load_netparams_tf_q(param_pickle_path)
    data_spec = networks.get_data_spec(net_name)
    input_node = tf.placeholder(tf.float32,
                                shape=(None, data_spec.crop_size,
                                       data_spec.crop_size,
                                       data_spec.channels))
    label_node = tf.placeholder(tf.int32)
    if net_name == 'alexnet':
        logits_ = networks.alexnet(input_node, netparams)
    elif net_name == 'googlenet':
        logits_ = networks.googlenet(input_node, netparams)
    elif net_name == 'nin':
        logits_ = networks.nin(input_node, netparams)
    elif net_name == 'resnet18':
        logits_ = networks.resnet18(input_node, netparams)
    elif net_name == 'resnet50':
        logits_ = networks.resnet50(input_node, netparams)
    elif net_name == 'squeezenet':
        logits_ = networks.squeezenet(input_node, netparams)
    elif net_name == 'vgg16net':
        logits_ = networks.vgg16net_noisy(input_node, netparams)
    loss_op = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits_,
                                                labels=label_node))  #
    probs = softmax(logits_)
    top_k_op = tf.nn.in_top_k(probs, label_node, 5)
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001, epsilon=0.1)
    correct_pred = tf.equal(tf.argmax(probs, 1), tf.argmax(label_node, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    count = 0
    correct = 0
    cur_accuracy = 0
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        image_producer = dataset.ImageNetProducer(val_path=IMAGE_LABLE,
                                                  data_path=IMAGE_PATH,
                                                  data_spec=data_spec)
        total = len(image_producer)
        coordinator = tf.train.Coordinator()
        threads = image_producer.start(session=sess, coordinator=coordinator)
        for (labels, images) in image_producer.batches(sess):
            correct += np.sum(
                sess.run(top_k_op,
                         feed_dict={
                             input_node: images,
                             label_node: labels
                         }))
            count += len(labels)
            cur_accuracy = float(correct) * 100 / count
            #print('{:>6}/{:<6} {:>6.2f}%'.format(count, total, cur_accuracy))
        print(cur_accuracy)
        coordinator.request_stop()
        coordinator.join(threads, stop_grace_period_secs=2)
    return cur_accuracy
예제 #2
0
            val_loss, val_accuray = validate(val_data, net, criterion,
                                             (opt.num_parts + 1), ctx)
            epoch_str = (
                "Epoch %d. Train loss: %f, Val loss %f, Val accuray %f, " %
                (epoch, __loss, val_loss, val_accuray))
        else:
            epoch_str = ("Epoch %d. Train loss: %f, " % (epoch, __loss))

        prev_time = cur_time
        print(epoch_str + time_str + ', lr ' + str(trainer.learning_rate))

    net.save_parameters("params/resnet50.params")


if __name__ == '__main__':
    opt = parser.parse_args()
    logging.info(opt)
    random.seed(opt.seed)
    mx.random.seed(opt.seed)

    batch_size = opt.batch_size
    num_gpus = opt.num_gpus
    context = [mx.gpu(i) for i in range(num_gpus)]
    epochs = [int(i) for i in opt.epochs.split(',')]
    batch_size *= max(1, num_gpus)

    net = resnet50(ctx=context,
                   num_features=256,
                   num_classes=751,
                   num_parts=opt.num_parts)
    main(net, batch_size, epochs, opt, context)
예제 #3
0
    # set gpu ids
    if len(gpu_ids) > 0:
        context = mx.gpu()

    test_set = [(line, int(line.split('_')[0]))
                for line in os.listdir(data_dir + 'test')]
    query_set = [(line, int(line.split('_')[0]))
                 for line in os.listdir(data_dir + 'query')]

    test_cam, test_label = get_id(test_set)
    query_cam, query_label = get_id(query_set)

    ######################################################################
    # Load Collected data Trained model
    model_structure = resnet50(ctx=context, pretrained=False)
    model = load_network(model_structure, context)

    # Extract feature
    test_loader, query_loader = get_data(batch_size, test_set, query_set)
    print('start test')
    test_feature = extract_feature(model, test_loader, context)
    print('start query')
    query_feature = extract_feature(model, query_loader, context)

    # Save to Matlab for check
    sio.savemat('result/test.mat', {'data': test_feature})
    sio.savemat('result/testID.mat', {'data': test_label})
    sio.savemat('result/testCam.mat', {'data': test_cam})

    sio.savemat('result/query.mat', {'data': query_feature})
예제 #4
0
        if val_data is not None:
            val_loss, val_accuracy = validate(val_data, net, criterion, ctx)
            epoch_str = (
                "Epoch %d. Train loss: %f, Val loss %f, Val accuracy %f, " %
                (epoch, __loss, val_loss, val_accuracy))
        else:
            epoch_str = ("Epoch %d. Train loss: %f, " % (epoch, __loss))

        prev_time = cur_time
        print(epoch_str + time_str + ', lr ' + str(trainer.learning_rate))

    if not os.path.exists("params"):
        os.mkdir("params")
    net.save_parameters("params/resnet50.params")


if __name__ == '__main__':
    opt = parser.parse_args()
    logging.info(opt)
    mx.random.seed(opt.seed)

    batch_size = opt.batch_size
    num_gpus = opt.num_gpus
    epochs = [int(i) for i in opt.epochs.split(',')]
    batch_size *= max(1, num_gpus)

    context = [mx.gpu(i) for i in range(num_gpus)]
    net = resnet50(ctx=context, num_classes=751)
    main(net, batch_size, epochs, opt, context)
예제 #5
0
        for x in ['train', 'val']
    }

    dataloaders = {
        x: torch.utils.data.DataLoader(image_datasets[x],
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=0)
        for x in ['train', 'val']
    }

    print('size of dataloader: {}'.format(dataloaders.__sizeof__()))
    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
    class_names = image_datasets['train'].classes

    model_slice = networks.resnet50(pretrained=False)
    model_slice.cuda()
    num_ftrs_slice = model_slice.fc.in_features
    model_slice.fc = nn.Linear(num_ftrs_slice, 2)
    model_slice = model_slice.to(device)

    model_patch = networks.resnet34(pretrained=False)
    model_patch.cuda()
    num_ftrs_patch = model_patch.fc.in_features
    model_patch.fc = nn.Linear(num_ftrs_patch, 2)
    model_patch = model_patch.to(device)

    slice_model_path = os.path.join(slice_dir, 'best_scratch_ResNet-50.pth')
    patch_model_path = os.path.join(patch_dir, 'best_scratch_ResNet-34.pth')
    '''Load the pretrained streams!'''
    model_slice.load_state_dict(
예제 #6
0
파일: test.py 프로젝트: wjgaas/gluon-reid
    # set gpu ids
    if len(gpu_ids) > 0:
        context = mx.gpu()

    test_set = [(line, int(line.split('_')[0]))
                for line in os.listdir(data_dir + 'test')]
    query_set = [(line, int(line.split('_')[0]))
                 for line in os.listdir(data_dir + 'query')]

    test_cam, test_label = get_id(test_set)
    query_cam, query_label = get_id(query_set)

    ######################################################################
    # Load Collected data Trained model
    model_structure = resnet50(ctx=context,
                               pretrained=False,
                               num_features=256,
                               num_classes=751)
    model = load_network(model_structure, context)

    # Extract feature
    test_loader, query_loader = get_data(batch_size, test_set, query_set)
    print('start test')
    test_feature = extract_feature(model, test_loader, context)
    print('start query')
    query_feature = extract_feature(model, query_loader, context)

    # Save to Matlab for check
    sio.savemat('result/test.mat', {'data': test_feature})
    sio.savemat('result/testID.mat', {'data': test_label})
    sio.savemat('result/testCam.mat', {'data': test_cam})
예제 #7
0
파일: run.py 프로젝트: fcdl94/UDA
def get_setting():
    global n_classes
    if 'svhn' in args.dataset:
        transform = tv.transforms.Compose([
            transforms.Resize((28, 28)),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])
        source = tv.datasets.SVHN(ROOT, download=True, transform=transform)
        source.targets = torch.tensor(source.labels)

        test = tv.datasets.MNIST(ROOT,
                                 train=False,
                                 download=True,
                                 transform=tv.transforms.Compose(
                                     [tv.transforms.Grayscale(3), transform]))
        target = tv.datasets.MNIST(
            ROOT,
            train=True,
            download=True,
            transform=tv.transforms.Compose(
                [tv.transforms.Grayscale(3), transform]),
            target_transform=transforms.Lambda(lambda y: -1))
        EPOCHS = 150
        batch_size = 64
        n_classes = 10
        net = svhn_net().to(device)
        init_lr = 0.01

    elif 'mnist' in args.dataset:
        transform = tv.transforms.Compose([
            transforms.Resize((28, 28)),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])
        source = tv.datasets.MNIST(ROOT,
                                   train=True,
                                   download=True,
                                   transform=tv.transforms.Compose(
                                       [tv.transforms.Grayscale(3),
                                        transform]))
        test = MNISTM(ROOT, train=False, download=True, transform=transform)
        target = MNISTM(ROOT,
                        train=True,
                        download=True,
                        transform=transform,
                        target_transform=transforms.Lambda(lambda y: -1))
        EPOCHS = 40
        net = lenet_net().to(device)
        batch_size = 64
        n_classes = 10
        init_lr = 0.01
    else:
        paths = {
            "p": ROOT + "office/Product",
            "a": ROOT + "office/Art",
            "c": ROOT + "office/Clipart",
            "r": ROOT + "office/Real World"
        }

        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])

        # Normalize to have range between -1,1 : (x - 0.5) * 2
        transform = transforms.Compose(
            [transforms.Resize((224, 224)),
             transforms.ToTensor(), normalize])
        # Create data augmentation transform
        augmentation = transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224, (0.6, 1.)),
            transforms.RandomHorizontalFlip(), transform
        ])

        source = ImageFolder(paths[args.source], augmentation)
        target = ImageFolder(paths[args.target],
                             augmentation,
                             target_transform=transforms.Lambda(lambda y: -1))

        test = ImageFolder(paths[args.target], transform)
        EPOCHS = 60
        n_classes = 65
        net = resnet50(pretrained=True, num_classes=65).to(device)
        batch_size = 32
        init_lr = 0.001

    # target_loader = DataLoader(target, batch_size=batch_size, shuffle=True, num_workers=8)
    test_loader = DataLoader(test,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=8)
    target_loader = DataLoader(target,
                               batch_size=batch_size,
                               shuffle=True,
                               num_workers=8)
    source_loader = DataLoader(source,
                               batch_size=batch_size,
                               shuffle=True,
                               num_workers=8)

    return target_loader, source_loader, test_loader, net, EPOCHS, init_lr
예제 #8
0
        time_str = "Time %02d:%02d:%02d" % (h, m, s)
        __loss = _loss/len(train_data)

        if val_data is not None:
            val_loss, val_accuray = validate(val_data, net, criterion, ctx)
            epoch_str = ("Epoch %d. Train loss: %f, Val loss %f, Val accuray %f, " % (epoch, __loss , val_loss, val_accuray))
        else:
            epoch_str = ("Epoch %d. Train loss: %f, " % (epoch, __loss))

        prev_time = cur_time
        print(epoch_str + time_str + ', lr ' + str(trainer.learning_rate))

    if not os.path.exists("params"):
        os.mkdir("params")
    net.save_parameters("params/resnet50.params")


if __name__ == '__main__':
    opt = parser.parse_args()
    logging.info(opt)
    mx.random.seed(opt.seed)

    batch_size = opt.batch_size
    num_gpus = opt.num_gpus
    epochs = [int(i) for i in opt.epochs.split(',')]
    batch_size *= max(1, num_gpus)

    context = [mx.gpu(i) for i in range(num_gpus)]
    net = resnet50(ctx=context, num_classes=751)
    main(net, batch_size, epochs, opt, context)
예제 #9
0
        __loss = _loss/len(train_data)

        if val_data is not None:
            val_loss, val_accuray = validate(val_data, net, criterion1, criterion2, ctx)
            epoch_str = ("Epoch %d. Train loss: %f, Val loss %f, Val accuray %f, " % (epoch, __loss , val_loss, val_accuray))
        else:
            epoch_str = ("Epoch %d. Train loss: %f, " % (epoch, __loss))

        prev_time = cur_time
        print(epoch_str + time_str + ', lr ' + str(trainer.learning_rate))

    if not os.path.exists("params"):
        os.mkdir("params")
    net.save_parameters("params/resnet50.params")


if __name__ == '__main__':
    opt = parser.parse_args()
    logging.info(opt)
    random.seed(opt.seed)
    mx.random.seed(opt.seed)

    batch_size = opt.batch_size
    num_gpus = opt.num_gpus
    context = [mx.gpu(i) for i in range(num_gpus)]
    epochs = [int(i) for i in opt.epochs.split(',')]
    batch_size *= max(1, num_gpus)

    net = resnet50(ctx=context, num_features=256, num_classes=751)
    main(net, batch_size, epochs, opt, context)
예제 #10
0
    data_dir = osp.expanduser("~/.mxnet/datasets/Market-1501-v15.09.15/")
    gpu_ids = [0]

    # set gpu ids
    if len(gpu_ids)>0:
        context = mx.gpu()

    test_set = [(osp.join(data_dir,'bounding_box_test',line), int(line.split('_')[0])) for line in os.listdir(data_dir+'bounding_box_test') if "jpg" in line and "-1" not in line]
    query_set = [(osp.join(data_dir,'query',line), int(line.split('_')[0])) for line in os.listdir(data_dir+'query') if "jpg" in line]
    
    test_cam, test_label = get_id(test_set)
    query_cam, query_label = get_id(query_set)

    ######################################################################
    # Load Collected data Trained model
    model_structure = resnet50(ctx=context, pretrained=False)
    model = load_network(model_structure, context)

    # Extract feature
    test_loader, query_loader = get_data(batch_size, test_set, query_set)
    print('start test')
    test_feature = extract_feature(model, test_loader, context)
    print('start query')
    query_feature = extract_feature(model, query_loader, context)


    query_feature = nd.array(query_feature).as_in_context(mx.gpu(0))
    test_feature = nd.array(test_feature).as_in_context(mx.gpu(0))

    num = query_label.size
    dist_all = nd.linalg.gemm2(query_feature, test_feature, transpose_b=True)