Ejemplo n.º 1
0
# coding: utf-8
import binarybrain as bb
import numpy as np

# option
epoch = 16
mini_batch = 32
file_read = False
file_write = True

# load data
td = bb.load_cifar10()

batch_size = len(td['x_train'])
print('batch_size =', batch_size)

# main network
cnv0_sub = bb.Sequential.create()
cnv0_sub.add(bb.DenseAffine.create([32]))
cnv0_sub.add(bb.BatchNormalization.create())
cnv0_sub.add(bb.ReLU.create())

cnv1_sub = bb.Sequential.create()
cnv1_sub.add(bb.DenseAffine.create([32]))
cnv1_sub.add(bb.BatchNormalization.create())
cnv1_sub.add(bb.ReLU.create())

cnv2_sub = bb.Sequential.create()
cnv2_sub.add(bb.DenseAffine.create([64]))
cnv2_sub.add(bb.BatchNormalization.create())
cnv2_sub.add(bb.ReLU.create())
Ejemplo n.º 2
0
def main():
    binary_mode = False
    epoch = 8
    mini_batch = 32
    training_modulation_size = 3
    inference_modulation_size = 3

    # load data
    td = bb.load_cifar10()

    batch_size = len(td['x_train'])
    print('batch_size =', batch_size)

    ############################
    # Learning
    ############################

    # create layer
    layer_cnv0_affine = bb.DenseAffine.create([32])
    layer_cnv0_batchnorm = bb.BatchNormalization.create()
    layer_cnv0_actibation = bb.ReLU.create()

    layer_cnv1_affine = bb.DenseAffine.create([32])
    layer_cnv1_batchnorm = bb.BatchNormalization.create()
    layer_cnv1_actibation = bb.ReLU.create()

    layer_cnv2_affine = bb.DenseAffine.create([64])
    layer_cnv2_batchnorm = bb.BatchNormalization.create()
    layer_cnv2_actibation = bb.ReLU.create()

    layer_cnv3_affine = bb.DenseAffine.create([64])
    layer_cnv3_batchnorm = bb.BatchNormalization.create()
    layer_cnv3_actibation = bb.ReLU.create()

    layer_cnv3_affine = bb.DenseAffine.create([512])
    layer_cnv3_batchnorm = bb.BatchNormalization.create()
    layer_cnv3_actibation = bb.ReLU.create()

    # main network
    cnv0_sub = bb.Sequential.create()
    cnv0_sub.add(layer_cnv0_affine)
    cnv0_sub.add(layer_cnv0_batchnorm)
    cnv0_sub.add(layer_cnv0_actibation)

    cnv1_sub = bb.Sequential.create()
    cnv1_sub.add(layer_cnv1_affine)
    cnv1_sub.add(layer_cnv1_batchnorm)
    cnv1_sub.add(layer_cnv1_actibation)

    cnv2_sub = bb.Sequential.create()
    cnv2_sub.add(layer_cnv2_affine)
    cnv2_sub.add(layer_cnv2_batchnorm)
    cnv2_sub.add(layer_cnv2_actibation)

    cnv3_sub = bb.Sequential.create()
    cnv3_sub.add(layer_cnv3_affine)
    cnv3_sub.add(layer_cnv3_batchnorm)
    cnv3_sub.add(layer_cnv3_actibation)

    main_net = bb.Sequential.create()
    main_net.add(bb.LoweringConvolution.create(cnv0_sub, 3, 3))
    main_net.add(bb.LoweringConvolution.create(cnv1_sub, 3, 3))
    main_net.add(bb.MaxPooling.create(2, 2))
    main_net.add(bb.LoweringConvolution.create(cnv2_sub, 3, 3))
    main_net.add(bb.LoweringConvolution.create(cnv3_sub, 3, 3))
    main_net.add(bb.MaxPooling.create(2, 2))
    main_net.add(bb.DenseAffine.create([512]))
    main_net.add(bb.BatchNormalization.create())
    main_net.add(bb.ReLU.create())
    main_net.add(bb.DenseAffine.create([10]))
    if binary_mode:
        main_net.add(bb.ReLU.create())

    # wrapping with binary modulator
    net = bb.Sequential.create()
    net.add(
        bb.BinaryModulation.create(
            main_net, training_modulation_size=training_modulation_size))
    net.add(bb.Reduce.create(td['t_shape']))
    net.set_input_shape(td['x_shape'])

    # set no binary mode
    if binary_mode:
        net.send_command("binary true")
    else:
        net.send_command("binary false")

    # print model information
    print(net.get_info())

    # learning
    print('\n[learning]')

    loss = bb.LossSoftmaxCrossEntropy.create()
    metrics = bb.MetricsCategoricalAccuracy.create()
    optimizer = bb.OptimizerAdam.create()
    optimizer.set_variables(net.get_parameters(), net.get_gradients())

    runner = bb.Runner(net, "cifar10-dense-cnn", loss, metrics, optimizer)
    runner.fitting(td,
                   epoch_size=epoch,
                   mini_batch_size=mini_batch,
                   file_read=True,
                   file_write=True)
Ejemplo n.º 3
0
def main():
    # config
    epoch                     = 4
    mini_batch                = 32
    training_modulation_size  = 3
    inference_modulation_size = 3
    
    # load data
    td = bb.load_cifar10()

    batch_size = len(td['x_train'])
    print('batch_size =', batch_size)
    
    
    
    ############################
    # Learning
    ############################
    
    # create layer
    layer_sl0 = bb.SparseLut6.create([1024])
    layer_sl1 = bb.SparseLut6.create([480])
    layer_sl2 = bb.SparseLut6.create([70])
    
    # create network
    main_net = bb.Sequential.create()
    main_net.add(layer_sl0)
    main_net.add(layer_sl1)
    main_net.add(layer_sl2)
    
    # wrapping with binary modulator
    net = bb.Sequential.create()
    net.add(bb.BinaryModulation.create(main_net, training_modulation_size=training_modulation_size))
    net.add(bb.Reduce.create(td['t_shape']))
    net.set_input_shape(td['x_shape'])
    
    # set binary mode
    net.send_command('binary true')
    
    # print model information
    print(net.get_info())
    
    # learning
    print('\n[learning]')
    loss      = bb.LossSoftmaxCrossEntropy.create()
    metrics   = bb.MetricsCategoricalAccuracy.create()
    optimizer = bb.OptimizerAdam.create()
    optimizer.set_variables(net.get_parameters(), net.get_gradients())
    
    runner = bb.Runner(net, "cifar10-sparse-lut6-simple", loss, metrics, optimizer)
    runner.fitting(td, epoch_size=epoch, mini_batch_size=mini_batch, file_read=True, file_write=True)
    
    
    ################################
    # convert to FPGA
    ################################
    
    print('\n[convert to Binary LUT]')
    
    # LUT-network
    layer_bl0 = bb.BinaryLut6.create(layer_sl0.get_output_shape())
    layer_bl1 = bb.BinaryLut6.create(layer_sl1.get_output_shape())
    layer_bl2 = bb.BinaryLut6.create(layer_sl2.get_output_shape())
    
    lut_net = bb.Sequential.create()
    lut_net.add(layer_bl0)
    lut_net.add(layer_bl1)
    lut_net.add(layer_bl2)
    
    # evaluate network
    eval_net = bb.Sequential.create()
    eval_net.add(bb.BinaryModulation.create(lut_net, inference_modulation_size=inference_modulation_size))
    eval_net.add(bb.Reduce.create(td['t_shape']))
    
    # set input shape
    eval_net.set_input_shape(td['x_shape'])
    
    # parameter copy
    print('parameter copy to binary LUT-Network')
    layer_bl0.import_parameter(layer_sl0)
    layer_bl1.import_parameter(layer_sl1)
    layer_bl2.import_parameter(layer_sl2)
    
    # evaluate network
    print('evaluate LUT-Network')
    lut_runner = bb.Runner(eval_net, "cifar10-binary-lut6-simple",
                    bb.LossSoftmaxCrossEntropy.create(),
                    bb.MetricsCategoricalAccuracy.create())
    lut_runner.evaluation(td, mini_batch_size=mini_batch)
    
    # write Verilog
    print('write verilog file')
    with open('Cifar10LutSimple.v', 'w') as f:
        f.write('`timescale 1ns / 1ps\n\n')
        f.write(bb.make_verilog_from_lut('Cifar10LutSimple', [layer_bl0, layer_bl1, layer_bl2]))
Ejemplo n.º 4
0
def main():
    epoch = 4
    mini_batch = 32
    training_modulation_size = 3
    inference_modulation_size = 3

    # load data
    td = bb.load_cifar10()

    batch_size = len(td['x_train'])
    print('batch_size =', batch_size)

    ############################
    # Learning
    ############################

    # create layer
    layer_cnv0_sl0 = bb.SparseLut6Bit.create([192])
    layer_cnv0_sl1 = bb.SparseLut6Bit.create([32])

    layer_cnv1_sl0 = bb.SparseLut6Bit.create([1152])
    layer_cnv1_sl1 = bb.SparseLut6Bit.create([192])
    layer_cnv1_sl2 = bb.SparseLut6Bit.create([32])

    layer_cnv2_sl0 = bb.SparseLut6Bit.create([2304])
    layer_cnv2_sl1 = bb.SparseLut6Bit.create([384])
    layer_cnv2_sl2 = bb.SparseLut6Bit.create([64])

    layer_cnv3_sl0 = bb.SparseLut6Bit.create([2384])
    layer_cnv3_sl1 = bb.SparseLut6Bit.create([384])
    layer_cnv3_sl2 = bb.SparseLut6Bit.create([64])

    layer_sl4 = bb.SparseLut6Bit.create([18432])
    layer_sl5 = bb.SparseLut6Bit.create([3072])
    layer_sl6 = bb.SparseLut6Bit.create([512])

    layer_sl7 = bb.SparseLut6Bit.create([2160])
    layer_sl8 = bb.SparseLut6Bit.create([360])
    layer_sl9 = bb.SparseLut6Bit.create([60])
    layer_sl10 = bb.SparseLut6Bit.create([10])

    # main network
    cnv0_sub = bb.Sequential.create()
    cnv0_sub.add(layer_cnv0_sl0)
    cnv0_sub.add(layer_cnv0_sl1)

    cnv1_sub = bb.Sequential.create()
    cnv1_sub.add(layer_cnv1_sl0)
    cnv1_sub.add(layer_cnv1_sl1)
    cnv1_sub.add(layer_cnv1_sl2)

    cnv2_sub = bb.Sequential.create()
    cnv2_sub.add(layer_cnv2_sl0)
    cnv2_sub.add(layer_cnv2_sl1)
    cnv2_sub.add(layer_cnv2_sl2)

    cnv3_sub = bb.Sequential.create()
    cnv3_sub.add(layer_cnv3_sl0)
    cnv3_sub.add(layer_cnv3_sl1)
    cnv3_sub.add(layer_cnv3_sl2)

    main_net = bb.Sequential.create()
    main_net.add(bb.LoweringConvolutionBit.create(cnv0_sub, 3, 3))
    main_net.add(bb.LoweringConvolutionBit.create(cnv1_sub, 3, 3))
    main_net.add(bb.MaxPoolingBit.create(2, 2))
    main_net.add(bb.LoweringConvolutionBit.create(cnv2_sub, 3, 3))
    main_net.add(bb.LoweringConvolutionBit.create(cnv3_sub, 3, 3))
    main_net.add(bb.MaxPoolingBit.create(2, 2))
    main_net.add(layer_sl4)
    main_net.add(layer_sl5)
    main_net.add(layer_sl6)
    main_net.add(layer_sl7)
    main_net.add(layer_sl8)
    main_net.add(layer_sl9)
    main_net.add(layer_sl10)

    # wrapping with binary modulator
    net = bb.Sequential.create()
    net.add(
        bb.BinaryModulationBit.create(
            main_net, training_modulation_size=training_modulation_size))
    net.add(bb.Reduce.create(td['t_shape']))
    net.set_input_shape(td['x_shape'])

    # set binary mode
    net.send_command("binary true")

    # print model information
    print(net.get_info())

    # learning
    print('\n[learning]')

    loss = bb.LossSoftmaxCrossEntropy.create()
    metrics = bb.MetricsCategoricalAccuracy.create()
    optimizer = bb.OptimizerAdam.create()
    optimizer.set_variables(net.get_parameters(), net.get_gradients())

    runner = bb.Runner(net, "cifar10-sparse-lut6-cnn", loss, metrics,
                       optimizer)
    runner.fitting(td,
                   epoch_size=epoch,
                   mini_batch_size=mini_batch,
                   file_read=True,
                   file_write=True)