예제 #1
0
import logging
import numpy as np
import matplotlib.pyplot as plt
import pdb
from dorefa_ops import get_dorefa
from math_ops import *
from random import randint

logging.getLogger().setLevel(logging.DEBUG)

BITW = 1
BITA = 1
BITG = 32  # TODO: we don't have binarized gradient implementation yet.

# get quantized functions
f_w, f_a, f_g = get_dorefa(BITW, BITA, BITG)


def to4d(img):
    return img.reshape(img.shape[0], 1, 28, 28).astype(np.float32) / 255


def prepair_data(train_img, val_img, train_lbl, val_lbl, batch_size):
    train_iter = mx.io.NDArrayIter(to4d(train_img),
                                   train_lbl,
                                   batch_size,
                                   shuffle=True)
    val_iter = mx.io.NDArrayIter(to4d(val_img), val_lbl, batch_size)
    return train_iter, val_iter

예제 #2
0
import logging
import numpy as np
import matplotlib.pyplot as plt
import pdb
from dorefa_ops import get_dorefa
from math_ops import *
from random import randint

logging.getLogger().setLevel(logging.DEBUG)

BITW = 1
BITA = 1
BITG = 6 # TODO: we don't have binarized gradient implementation yet.

# get quantized functions
f_w, f_a, f_g = get_dorefa(BITW, BITA, BITG)

def to4d(img):
	return img.reshape(img.shape[0], 1, 28, 28).astype(np.float32)/255

def prepair_data(train_img, val_img, train_lbl, val_lbl, batch_size):	
	train_iter = mx.io.NDArrayIter(to4d(train_img), train_lbl, batch_size, shuffle=True)
	val_iter = mx.io.NDArrayIter(to4d(val_img), val_lbl, batch_size)
	return train_iter, val_iter

def get_lenet():
	"""
	original lenet
	"""
	data = mx.symbol.Variable('data')
	# first conv layer