コード例 #1
0
 if args.model == 'caffe':
     train_x, test_x = normalize_for_alexnet(train_x, test_x)
     net = caffe_net.create_net(args.use_cpu)
     # for cifar10_full_train_test.prototxt
     train((train_x, train_y, test_x, test_y),
           net,
           160,
           alexnet_lr,
           0.004,
           use_cpu=args.use_cpu)
     # for cifar10_quick_train_test.prototxt
     # train((train_x, train_y, test_x, test_y), net, 18, caffe_lr, 0.004,
     #      use_cpu=args.use_cpu)
 elif args.model == 'alexnet':
     train_x, test_x = normalize_for_alexnet(train_x, test_x)
     net = alexnet.create_net(args.use_cpu)
     train((train_x, train_y, test_x, test_y),
           net,
           2,
           alexnet_lr,
           0.004,
           use_cpu=args.use_cpu)
 elif args.model == 'vgg':
     train_x, test_x = normalize_for_vgg(train_x, test_x)
     net = vgg.create_net(args.use_cpu)
     train((train_x, train_y, test_x, test_y),
           net,
           250,
           vgg_lr,
           0.0005,
           use_cpu=args.use_cpu)
コード例 #2
0
ファイル: train.py プロジェクト: liyuchenmike/incubator-singa
        print 'test loss = %f, test accuracy = %f' \
            % (loss / num_test_batch, acc / num_test_batch)
    net.save('model.bin')  # save model params into checkpoint file

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Train vgg/alexnet for cifar10')
    parser.add_argument('model', choices=['vgg', 'alexnet', 'resnet'], default='alexnet')
    parser.add_argument('data', default='cifar-10-batches-py')
    parser.add_argument('--use_cpu', action='store_true')
    args = parser.parse_args()
    assert os.path.exists(args.data), \
        'Pls download the cifar10 dataset via "download_data.py py"'
    print 'Loading data ..................'
    train_x, train_y = load_train_data(args.data)
    test_x, test_y = load_test_data(args.data)
    if args.model == 'alexnet':
        train_x, test_x = normalize_for_alexnet(train_x, test_x)
        net = alexnet.create_net(args.use_cpu)
        train((train_x, train_y, test_x, test_y), net, 160, alexnet_lr, 0.004,
              use_cpu=args.use_cpu)
    elif args.model == 'vgg':
        train_x, test_x = normalize_for_vgg(train_x, test_x)
        net = vgg.create_net(args.use_cpu)
        train((train_x, train_y, test_x, test_y), net, 250, vgg_lr, 0.0005,
              use_cpu=args.use_cpu)
    else:
        train_x, test_x = normalize_for_alexnet(train_x, test_x)
        net = resnet.create_net(args.use_cpu)
        train((train_x, train_y, test_x, test_y), net, 200, resnet_lr, 1e-4,
              use_cpu=args.use_cpu)
コード例 #3
0
    x.to_device(cuda)
    y = net.predict(x)
    y.to_host()
    y = tensor.to_numpy(y)
    prob = np.average(y, 0)
    labels = np.flipud(np.argsort(prob))  # sort prob in descending order
    return labels[0:topk], prob[labels[0:topk]]


def compute_image_mean(train_dir, num):
    images = load_dataset(train_dir, num)
    print images
    return np.average(images, axis=1)


def compute_std(train_dir, num):
    images = load_dataset(train_dir, num)
    return np.std(images)


if __name__ == '__main__':
    model = alexnet.create_net()
    model.load('model.bin')
    cuda = device.create_cuda_gpu()
    model.to_device(cuda)

    #mean = compute_image_mean('/home/goku/Downloads/transformed103variance.csv',1)
    #std = compute_std('/home/goku/Downloads/transformed103variance.csv',1)
    test_images, _ = load_dataset('/home/goku/Downloads/6.csv', 2)
    print predict(model, test_images, cuda)
コード例 #4
0
ファイル: predict.py プロジェクト: ijingo/incubator-singa
    images = np.empty((num_batches * batchsize, 3, 32, 32), dtype=np.uint8)
    for did in range(1, num_batches + 1):
        fname_train_data = dir_path + "/data_batch_{}".format(did)
        image, label = load_dataset(fname_train_data)
        images[(did - 1) * batchsize:did * batchsize] = image
        labels.extend(label)
    images = np.array(images, dtype=np.float32)
    labels = np.array(labels, dtype=np.int32)
    return images, labels


def load_test_data(dir_path):
    images, labels = load_dataset(dir_path + "/test_batch")
    return np.array(images,  dtype=np.float32), np.array(labels, dtype=np.int32)


def compute_image_mean(train_dir):
    images, _ = load_train_data(train_dir)
    return np.average(images, 0)

if __name__ == '__main__':
    model = alexnet.create_net(True)
    model.load('model', 20)  # the checkpoint from train.py
    dev = device.get_default_device()
    model.to_device(dev)

    mean = compute_image_mean('cifar-10-batches-py')
    test_images, _ = load_test_data('cifar-10-batches-py')
    # predict for two images
    print predict(model, test_images[0:2] - mean, dev)
コード例 #5
0
    images = np.empty((num_batches * batchsize, 3, 32, 32), dtype=np.uint8)
    for did in range(1, num_batches + 1):
        fname_train_data = dir_path + "/data_batch_{}".format(did)
        image, label = load_dataset(fname_train_data)
        images[(did - 1) * batchsize:did * batchsize] = image
        labels.extend(label)
    images = np.array(images, dtype=np.float32)
    labels = np.array(labels, dtype=np.int32)
    return images, labels


def load_test_data(dir_path):
    images, labels = load_dataset(dir_path + "/test_batch")
    return np.array(images,  dtype=np.float32), np.array(labels, dtype=np.int32)


def compute_image_mean(train_dir):
    images, _ = load_train_data(train_dir)
    return np.average(images, 0)

if __name__ == '__main__':
    model = alexnet.create_net(True)
    model.load('model', 20)  # the checkpoint from train.py
    dev = device.get_default_device()
    model.to_device(dev)

    mean = compute_image_mean('cifar-10-batches-py')
    test_images, _ = load_test_data('cifar-10-batches-py')
    # predict for two images
    print(predict(model, test_images[0:2] - mean, dev))
コード例 #6
0
            loss += l
            acc += a
            for (s, p, g) in zip(net.param_specs(), net.param_values(), grads):
                opt.apply_with_lr(epoch, get_lr(epoch), g, p, str(s.name))
            # update progress bar
            	utils.update_progress(b * 1.0 / num_train_batch,
                                 'training loss = %f, accuracy = %f' % (l, a))
                info = '\ntraining loss = %f, training accuracy = %f' \
                % (loss/num_train_batch, acc/num_train_batch)
        print info
        
        loss,acc=0.000,0.000
        np.random.shuffle(id)
        for b in range(num_test_batch):
         	x = test_x[b * batch_size:(b+1) * batch_size]
            	y = test_y[b * batch_size:(b+1) * batch_size]
                tx.copy_from_numpy(x)
            	ty.copy_from_numpy(y)
            	l, a = net.evaluate(tx, ty)
		loss += l
                acc += a
 	print 'test loss = %f, test accuracy = %f' \
            % (loss / num_test_batch, acc / num_test_batch)
    net.save('model.bin')  # save model params into checkpoint file
    

if __name__ == '__main__':
    data_dir = '/home/goku/Downloads/6.csv'
    net = alexnet.create_net()
    train(data_dir, net)