Example #1
0
def exp_dense():
    np.random.seed(10)
    cuda.set_cuda_active(False)
    a = np.random.rand(32, 320).astype(np.float32)
    b = np.random.rand(32, 80).astype(np.float32)
    layer1 = rm.Dense(input_size=320, output_size=100)
    layer2 = rm.Dense(input_size=100, output_size=80)
    ga = rm.Variable(a, auto_update=False)
    gb = rm.Variable(b, auto_update=False)
    opt = Sgd(0.01, momentum=0.3)
    start_t = time.time()

    for _ in range(500):
        loss = rm.Sum((layer2(rm.Sigmoid(layer1(ga))) - gb)**2) / 32
        loss.ensure_cpu()
        print(loss)
        grad = loss.grad()
        grad.update(opt)
    print(time.time() - start_t)
Example #2
0
def exp_convolution2():
    np.random.seed(10)
    cuda.set_cuda_active(True)
    a = np.random.randn(8, 3, 12, 12).astype(np.float32)
    b = np.random.randn(8, 16, 10, 10).astype(np.float32)
    layer1 = rm.Conv2d(channel=16, input_size=a.shape[1:])

    ga = rm.Variable(a, auto_update=False)
    gb = rm.Variable(b, auto_update=False)

    opt = Sgd(0.001, momentum=0.3)
    start_t = time.time()
    for _ in range(100000):
        loss = rm.Sum((rm.Sigmoid(layer1(ga)) - gb)**2) / 8
        loss.ensure_cpu()
        print(loss)
        grad = loss.grad()
        grad.update(opt)
        del loss
    print(time.time() - start_t)
Example #3
0
def exp_convolution1():
    np.random.seed(10)
    # Caused by CUDNN_CONVOLUTION_FWD_ALGO_GEMM is not deterministic.
    # 1724.07080078 GPU
    # 1715.86767578 CPU
    cuda.set_cuda_active(True)
    a = np.random.randn(8 * 2, 64, 32, 32).astype(np.float32)
    b = np.random.randn(8 * 2, 32, 28, 28).astype(np.float32)
    layer1 = rm.Conv2d(channel=32, input_size=a.shape[1:])
    layer2 = rm.Conv2d(channel=32, input_size=(32, 30, 30))

    ga = rm.Variable(a, auto_update=False)
    gb = rm.Variable(b, auto_update=False)

    opt = Sgd(0.0001, momentum=0.0)
    start_t = time.time()
    for _ in range(100):
        loss = rm.Sum((layer2(rm.Relu(layer1(ga))) - gb)**2) / 8
        loss.ensure_cpu()
        grad = loss.grad()
        grad.update(opt)
        print(loss)
    print(time.time() - start_t)
Example #4
0
        print("")
        t3 = self.layer2(t2)
        print("hidden x output weight:\n{}".format(self.layer2.params.w))
        print("hidden x output bias:\n{}".format(self.layer2.params.b))
        print("")
        print("output:\n{}".format(t3))
        print("output shape:{}".format(t3.shape))
        print("")
        return t3


epoch = 50
batch = 1
N = len(X)

optimizer = Sgd()

network = Mnist()
learning_curve = []

for i in range(epoch):
    perm = np.random.permutation(N)
    loss = 0
    for j in range(0, N // batch):
        train_batch = X[perm[j * batch:(j + 1) * batch]]
        response_batch = y[perm[j * batch:(j + 1) * batch]]
        with network.train():
            result = network(train_batch)
            l = rm.sigmoid_cross_entropy(result, response_batch)
        grad = l.grad()
        grad.update(optimizer)
Example #5
0
 def __init__(self, epoch, batch_size, network, opt=Sgd(), verbose=0):
     self.epoch = epoch
     self.batch_size = batch_size
     self.network = network
     self.opt = opt
     self.verbose = verbose
class Mnist(rm.Model):
    def __init__(self):
        self.layer1 = rm.Dense(output_size=5)
        self.layer2 = rm.Dense(1)

    def forward(self, x):
        t1 = self.layer1(x)
        t2 = rm.relu(t1)
        t3 = self.layer2(t2)
        return t3


epoch = 50
batch = 1
N = len(X)
optimizer = Sgd(lr=0.1, momentum=0.4)

network = Mnist()
learning_curve = []

for i in range(epoch):
    perm = np.random.permutation(N)
    loss = 0
    for j in range(0, N // batch):
        train_batch = X[perm[j * batch:(j + 1) * batch]]
        response_batch = y[perm[j * batch:(j + 1) * batch]]
        with network.train():
            result = network(train_batch)
            l = rm.sigmoid_cross_entropy(result, response_batch)
        grad = l.grad()
        grad.update(optimizer)
        self.layer1 = rm.Dense(output_size=100)
        self.layer2 = rm.Dense(output_size=10)

    def forward(self, x):
        return self.layer2(rm.relu(self.layer1(x)))


epoch = 10
batch = 100
count = 0

learning_curve = []
test_learning_curve = []

opt = Adam()
opt = Sgd()

N = len(X_train)

nn = MNist()

for i in range(epoch):
    start_t = time.time()
    perm = np.random.permutation(N)
    loss = 0
    for j in range(0, N // batch):
        train_batch = X_train[perm[j * batch:(j + 1) * batch]]
        responce_batch = labels_train[perm[j * batch:(j + 1) * batch]]

        with nn.train():
            result = nn(train_batch)
Example #8
0
classes = 10
bbox = 5

# load image generator
print("loading image generator...")
generator = ImageGenerator(item_path, background_path)

# load model
print("loading initial model...")
model = YOLOv2(classes=classes, bbox=bbox)
model.load(initial_weight_file)
num_gpu = cuGetDeviceCount()

#model.to_gpu()

opt = Sgd(lr=learning_rate, momentum=momentum)

#trainer = Trainer(model, batch_size=32, loss_func=yolo_detector, num_epoch=1, optimizer=opt, num_gpu=num_gpu)

#input_width=input_height=320
#x, t = generator.generate_samples(
#        n_samples=16,
#        n_items=3,
#        crop_width=input_width,
#        crop_height=input_height,
#        min_item_scale=0.1,
#        max_item_scale=0.4,
#        rand_angle=25,
#        minimum_crop=0.8,
#        delta_hue=0.01,
#        delta_sat_scale=0.5,
Example #9
0
        print("")
        t3 = self.layer2(t2)
        print("hidden x output weight:\n{}".format(self.layer2.params.w))
        print("hidden x output bias:\n{}".format(self.layer2.params.b))
        print("")
        print("output:\n{}".format(t3))
        print("output shape:{}".format(t3.shape))
        print("")
        return t3


epoch = 1000
batch = 1
N = len(X)

optimizer = Sgd(lr=0.05, momentum=0.0)

network = Mnist()

learning_curve = []
for i in range(epoch):
    perm = np.random.permutation(N)
    loss = 0
    for j in range(0, N // batch):
        train_batch = X[perm[j * batch:(j + 1) * batch]]
        response_batch = y[perm[j * batch:(j + 1) * batch]]
        with network.train():
            result = network(train_batch)
            #l = rm.sigmoid_cross_entropy(result, response_batch)
            l = rm.mse(result, response_batch)
        grad = l.grad()