Пример #1
0
def main(argv):

    if len(argv) < 4:
        print "Usage: %s [sp|mlp|cnn] model_path image_path" % argv[0]
        sys.exit()

    type = argv[1]
    model_path = argv[2]
    image_path = argv[3]

    if type == "sp":
        model = L.Classifier(net.MnistSP())
    elif type == "cnn":
        model = L.Classifier(net.MnistCNN())
    else:
        model = L.Classifier(net.MnistMLP())

    serializers.load_npz(model_path, model)

    print("input:\t%s" % image_path)

    x = load_image(image_path)
    x = chainer.Variable(np.asarray([x]))
    r = classify(model, x)

    print("output:")
    for i in range(len(r.data[0])):
        print "\t%d: %f" % (i, r.data[0][i])
    print("class:\t%d" % np.argmax(r.data[0]))
Пример #2
0
# Prepare dataset
print('load MNIST dataset')
mnist = data.load_mnist_data()
mnist['data'] = mnist['data'].astype(np.float32)
mnist['data'] /= 255
mnist['target'] = mnist['target'].astype(np.int32)

N = 60000
x_train, x_test = np.split(mnist['data'], [N])
y_train, y_test = np.split(mnist['target'], [N])
N_test = y_test.size

# Prepare multi-layer perceptron model, defined in net.py
if args.net == 'simple':
    model = L.Classifier(net.MnistMLP(784, n_units, 10))
    if args.gpu >= 0:
        cuda.get_device(args.gpu).use()
        model.to_gpu()
    xp = np if args.gpu < 0 else cuda.cupy
elif args.net == 'parallel':
    cuda.check_cuda_available()
    model = L.Classifier(net.MnistMLPParallel(784, n_units, 10))
    xp = cuda.cupy

# Setup optimizer
optimizer = optimizers.Adam()
optimizer.setup(model)

# Init/Resume
if args.initmodel:
Пример #3
0
                  orientations, pixels_per_cell, cells_per_block)
    #    x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.5, random_state=1234)
    joblib.dump(X, "test_X.pkl")
else:
    X = joblib.load("test_X.pkl")

n_units = 1000

xp = np
x = chainer.Variable(xp.asarray(X))

#9個の分類器をテストデータの各行に適用
pred = np.zeros((X.shape[0], 9), dtype=X.dtype)
for i_cls in range(9):
    print(i_cls)
    #予測
    model = L.Classifier(net.MnistMLP(15000, n_units, 2))

    serializers.load_npz('mlp' + str(i_cls) + '.model', model)

    pred_raw = model.to_cpu().predictor(x)
    #        np.max(pred.data,axis=1)

    pred_bin = np.argmax(pred_raw.data, axis=1)

    pred[:, i_cls] = pred_bin

data_final = pd.DataFrame(pred)

data_final.to_csv('submission.csv', header=False)  #,index=False)
# Prepare dataset
print('load MNIST dataset')
mnist = data.load_mnist_data()
mnist['data'] = mnist['data'].astype(np.float32)
mnist['data'] /= 255
mnist['target'] = mnist['target'].astype(np.int32)

N = 60000
x_train, x_test = np.split(mnist['data'], [N])
y_train, y_test = np.split(mnist['target'], [N])
N_test = y_test.size

# Prepare multi-layer perceptron model, defined in net.py
model = L.Classifier(
    net.MnistMLP(''' TODO: define the correct netwoek architecture '''))

# Setup optimizer
optimizer = optimizers.Adam()
optimizer.setup(model)

train_loss = []
test_loss = []
train_accuracy = []
test_accuracy = []

epochs = six.moves.range(1, n_epoch + 1)
# Learning loop
for epoch in epochs:
    print('epoch', epoch)
Пример #5
0
def train(trainL, trainA, testL, testA):
    trainLoss = []
    trainAccuracy = []
    testLoss = []
    testAccuracy = []
    l1w = []
    l2w = []
    l3w = []
    print('load MNIST dataset')
    mnist = data.load_mnist_data()
    mnist['data'] = mnist['data'].astype(np.float32)
    mnist['data'] /= 255
    mnist['target'] = mnist['target'].astype(np.int32)

    N = 1000
    lsizes = [784, 50, 50, 10]
    x_train, x_test = np.split(mnist['data'], [N])
    y_train, y_test = np.split(mnist['target'], [N])
    N_test = y_test.size

    # Prepare multi-layer perceptron model, defined in net.py
    if args.net == 'simple':
        #model = net.MnistMLP(lsizes)
        model = net.MnistMLP(layer_sizes=lsizes)
        if args.gpu >= 0:
            cuda.get_device(args.gpu).use()
            model.to_gpu()
        xp = np if args.gpu < 0 else cuda.cupy
    elif args.net == 'parallel':
        cuda.check_cuda_available()
        model = L.Classifier(net.MnistMLPParallel(784, n_units, 10))
        xp = cuda.cupy
    # Setup optimizer
    optimizer = optimizers.Adam()
    optimizer.setup(model)
    # Init/Resume
    if args.initmodel:
        print('Load model from', args.initmodel)
        serializers.load_npz(args.initmodel, model)
    if args.resume:
        print('Load optimizer state from', args.resume)
        serializers.load_npz(args.resume, optimizer)
    # Pretrain loop
    print("start pretrain")
    epo = p_epoch
    for j in six.moves.range(1, len(lsizes)):
        if j == len(lsizes) - 1:
            model.setfinetuning()
            print("start finetuning")
            epo = n_epoch
        for epoch in six.moves.range(1, epo + 1):
            print('layer ', j, 'p_epoch ', epoch)
            perm = np.random.permutation(N)
            sum_accuracy = 0
            sum_loss = 0
            for i in six.moves.range(0, N, batchsize):
                x = chainer.Variable(xp.asarray(x_train[perm[i:i +
                                                             batchsize]]))
                t = chainer.Variable(xp.asarray(y_train[perm[i:i +
                                                             batchsize]]))
                optimizer.update(model, x, t, j)
                sum_loss += float(model.loss.data) * len(t.data)
                if not model.pretrain:
                    sum_accuracy += float(model.accuracy.data) * len(t.data)
            if model.pretrain:
                print('Pretrain: train mean loss={}'.format(sum_loss / N))
            else:
                print('Finetune: train mean loss={}, accuracy={}'.format(
                    sum_loss / N, sum_accuracy / N))
            trainLoss.append(sum_loss / N)
            trainAccuracy.append(sum_accuracy / N)
            # evaluation
            sum_accuracy = 0
            sum_loss = 0
            model.train = False
            for i in six.moves.range(0, N_test, batchsize):
                x = chainer.Variable(xp.asarray(x_test[i:i + batchsize]),
                                     volatile='on')
                t = chainer.Variable(xp.asarray(y_test[i:i + batchsize]),
                                     volatile='on')
                loss = model(x, t, j)
                sum_loss += float(loss.data) * len(t.data)
                if not model.pretrain:
                    sum_accuracy += float(model.accuracy.data) * len(t.data)
            if model.pretrain:
                print('Pretrain: test  mean loss={}'.format(sum_loss / N_test))
            else:
                print('Finetune: test  mean loss={}, accuracy={}'.format(
                    sum_loss / N_test, sum_accuracy / N_test))
            testLoss.append(sum_loss / N_test)
            testAccuracy.append(sum_accuracy / N_test)
            model.train = True

    # Save the model and the optimizer
    savecsv(trainLoss, trainL)
    savecsv(trainAccuracy, trainA)
    savecsv(testLoss, testL)
    savecsv(testAccuracy, testA)

    print('save the model')
    serializers.save_npz('mlp.model', model)
    print('save the optimizer')
    serializers.save_npz('mlp.state', optimizer)
Пример #6
0
#pdb.set_trace()
x_train, x_test, y_train, y_test = train_test_split(mnist['data'], mnist['target'], test_size=0.30, random_state=123)
#pdb.set_trace();
print("Buying percentage test={}, train={}".format(np.where(y_train==1)[0].shape[0]*100/y_train.shape[0],np.where(y_test==1)[0].shape[0]*100/y_test.shape[0]))
print("Shorting percentage test={}, train={}".format(np.where(y_train==2)[0].shape[0]*100/y_train.shape[0],np.where(y_test==2)[0].shape[0]*100/y_test.shape[0]))
print("Holding percentage test={}, train={}".format(np.where(y_train==0)[0].shape[0]*100/y_train.shape[0],np.where(y_test==0)[0].shape[0]*100/y_test.shape[0]))
#running only for 1
#temp_1_test = np.where(y_test==1)[0]
#y_test =  y_test[temp_1_test]
#x_test = x_test[temp_1_test]
N = x_train.shape[0]
N_test = y_test.size

# Prepare multi-layer perceptron model, defined in net.py
if args.net == 'simple':
    model = L.Classifier(net.MnistMLP(61, n_units, 3))
    if args.gpu >= 0:
        cuda.get_device(args.gpu).use()
        model.to_gpu()
    xp = np if args.gpu < 0 else cuda.cupy
elif args.net == 'parallel':
    cuda.check_cuda_available()
    model = L.Classifier(net.MnistMLPParallel(61, n_units, 3))
    xp = cuda.cupy

# Setup optimizer
if 'opt' in args:
#Todo can also pass arguments to each optimizer, see https://github.com/mitmul/chainer-cifar10/blob/master/train.py#L62
    if args.opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
    elif args.opt == 'AdaGrad':
Пример #7
0
    targetset.append(0)

# Convert List into ndarray
for i in xrange(Num_data):
    dataset[i] = np.asarray(dataset[i]).astype(np.float32)
targetset = np.array(targetset).astype(np.int32)

# Split dataset to data_train(80%) and data_test(20%)
N = 320
data_train, data_test = np.split(dataset, [N])
target_train, target_test = np.split(targetset, [N])
N_test = target_test.size

# Prepare multi-layer perceptron model, defined in net.py
if args.net == 'simple':
    model = L.Classifier(net.MnistMLP(16128, n_units, 2))
    if args.gpu >= 0:
        cuda.get_device(args.gpu).use()
        model.to_gpu()
    xp = np if args.gpu < 0 else cuda.cupy
elif args.net == 'parallel':
    cuda.check_cuda_available()
    model = L.Classifier(net.MnistMLPParallel(16128, n_units, 2))
    xp = cuda.cupy

# Setup optimizer
optimizer = optimizers.Adam()
optimizer.setup(model)

# Init/Resume
if args.initmodel:
Пример #8
0

def draw_digit_w1(data, n, i, length):
    size = 28
    pylab.subplot(28, 28, n)
    Z = data.reshape(size, size)  # convert from vector to 28x28 matrix
    Z = Z[::-1, :]  # flip vertical
    pylab.xlim(0, size)
    pylab.ylim(0, size)
    pylab.pcolor(Z)
    pylab.title("%d" % i, size=9)
    pylab.gray()
    pylab.tick_params(labelbottom="off")
    pylab.tick_params(labelleft="off")


lsizes = [784, 50, 50, 10]
model = net.MnistMLP(layer_sizes=lsizes)
serializers.load_npz('mlp.model', model)
layer = model.__getitem__("l1")
pylab.style.use('fivethirtyeight')
pylab.figure(figsize=(28, 28))
cnt = 1
for i in range(len(layer.W.data)):
    draw_digit_w1(layer.W.data[i], cnt, i, layer.W.data[9].size)
    cnt += 1

#pylab.imshow(layer.W.data)
#pylab.gray()
pylab.savefig('layer1.png')
Пример #9
0
mnist['target'] = mnist['target'].astype(np.int32)

N = 60000
x_train, x_test = np.split(mnist['data'], [N])
y_train, y_test = np.split(mnist['target'], [N])
N_test = y_test.size

type = args.net2
# Prepare model, defined in net.py
if args.net == 'simple':
    if args.net2 == 'cnn':
        model = L.Classifier(net.MnistCNN())
    elif args.net2 == 'sp':
        model = L.Classifier(net.MnistSP())
    else:
        model = L.Classifier(net.MnistMLP())

    if args.gpu >= 0:
        cuda.get_device(args.gpu).use()
        model.to_gpu()
    xp = np if args.gpu < 0 else cuda.cupy
elif args.net == 'parallel':
    type = 'mlp'
    cuda.check_cuda_available()
    model = L.Classifier(net.MnistMLPParallel(784, n_units, 10))
    xp = cuda.cupy

# Setup optimizer
optimizer = optimizers.Adam()
optimizer.setup(model)