def __init__(self,
                 DATA_DIR,
                 ALPHA_DIR,
                 MODEL,
                 BATCH_SIZE,
                 HIDDEN_DIM,
                 SEQ_LENGTH,
                 LAYER_NUM,
                 DBG=False):

        set_debug(DBG)

        debug('[TextRNN]: Loading data...')
        X, y, VOCAB_SIZE, ix_to_char, chars = load_data(
            DATA_DIR, ALPHA_DIR, SEQ_LENGTH)

        debug('[TextRNN]: Creating ModelHandler...')
        self.modelhandler = ModelHandler(HIDDEN_DIM, VOCAB_SIZE, LAYER_NUM,
                                         MODEL)
        debug('[TextRNN]: Loading model...')
        self.model = self.modelhandler.load()

        debug('[TextRNN]: Creating Trainer...')
        self.trainer = Trainer(MODEL, self.model, X, y, VOCAB_SIZE, ix_to_char,
                               chars, BATCH_SIZE)
class TextRNN:
    def __init__(self,
                 DATA_DIR,
                 ALPHA_DIR,
                 MODEL,
                 BATCH_SIZE,
                 HIDDEN_DIM,
                 SEQ_LENGTH,
                 LAYER_NUM,
                 DBG=False):

        set_debug(DBG)

        debug('[TextRNN]: Loading data...')
        X, y, VOCAB_SIZE, ix_to_char, chars = load_data(
            DATA_DIR, ALPHA_DIR, SEQ_LENGTH)

        debug('[TextRNN]: Creating ModelHandler...')
        self.modelhandler = ModelHandler(HIDDEN_DIM, VOCAB_SIZE, LAYER_NUM,
                                         MODEL)
        debug('[TextRNN]: Loading model...')
        self.model = self.modelhandler.load()

        debug('[TextRNN]: Creating Trainer...')
        self.trainer = Trainer(MODEL, self.model, X, y, VOCAB_SIZE, ix_to_char,
                               chars, BATCH_SIZE)

    def train(self, epochs=50):
        debug('[TextRNN]: Training {} times...'.format(epochs))
        self.trainer.train(epochs)

    def generate(self, length, initx):
        debug('[TextRNN]: Generating {} characters...'.format(length))
        return self.trainer.generate(length, initx)
Example #3
0
    def load_model(self, model_name):
        with open('./models/triplet/' + model_name + '.pkl', 'rb') as file:
            model_attributes = pickle.load(file)
        
        self.data_handler       = DataHandler(model_attributes['dataset_name'], model_attributes['classes'])
        self.input_feature_size = self.data_handler.n_features
        self.model_handler      = ModelHandler(model_attributes['model_number'], model_attributes['embedding_size'], input_feature_dim=self.data_handler.shape)
        self.embedding_model    = self.model_handler.model
        self.embedding_size     = self.model_handler.embedding_size
        self.alpha              = model_attributes['alpha']
        self.batch_size         = model_attributes['batch_size']
        self.epochs             = model_attributes['epochs']
        self.steps_per_epoch    = model_attributes['steps_per_epoch']
        self.mining_method      = model_attributes['mining_method']
        self.number_of_samples_per_class = model_attributes['number_of_samples_per_class']
        
        self.create_triplet_model()

        self.net.load_weights('./models/triplet/' + model_name)
def main_func(args):

    import csv
    
    import tensorflow as tf
    
    from tensorflow.keras import metrics
    
    from tensorflow.keras.models import model_from_json,Model
    
    from tensorflow.keras.layers import Dense,concatenate,Dropout
    
    from tensorflow.keras.losses import categorical_crossentropy
    
    from tensorflow.keras.optimizers import Adadelta,Adam
    
    from sklearn.model_selection import train_test_split
    
    from ModelHandler import ModelHandler
    
    import numpy as np
    
    import argparse
    
    
    
    
    
    ###############################################################################
    
    # Support functions
    
    ###############################################################################
    
    
    
    #For description about top-k, including the explanation on how they treat ties (which can be misleading
    
    #if your classifier is outputting a lot of ties (e.g. all 0's will lead to high top-k)
    
    #https://www.tensorflow.org/api_docs/python/tf/nn/in_top_k
    
    def top_10_accuracy(y_true,y_pred):
    
        return metrics.top_k_categorical_accuracy(y_true,y_pred,k=10)
    
    
    
    def top_50_accuracy(y_true,y_pred):
    
        return metrics.top_k_categorical_accuracy(y_true,y_pred,k=50)
    
    
    
    def sub2ind(array_shape, rows, cols):
    
        ind = rows*array_shape[1] + cols
    
        ind[ind < 0] = -1
    
        ind[ind >= array_shape[0]*array_shape[1]] = -1
    
        return ind
    
    
    
    def ind2sub(array_shape, ind):
    
        ind[ind < 0] = -1
    
        ind[ind >= array_shape[0]*array_shape[1]] = -1
    
        rows = (ind.astype('int') / array_shape[1])
    
        cols = ind % array_shape[1]
    
        return (rows, cols)
    
    #Function to convert to log scale and removing small values below (max - (thresholdBelowMax=6)) and calculating percentage of power of each (Ti,Ri) combinatiion for each example
    
    def beamsLogScale(y,thresholdBelowMax):
    
            y_shape = y.shape
    
            
    
            for i in range(0,y_shape[0]):            
    
                thisOutputs = y[i,:]
    
                logOut = 20*np.log10(thisOutputs + 1e-30)
    
                minValue = np.amax(logOut) - thresholdBelowMax
    
                zeroedValueIndices = logOut < minValue
    
                thisOutputs[zeroedValueIndices]=0
    
                thisOutputs = thisOutputs / sum(thisOutputs)
    
                y[i,:] = thisOutputs
    
            
    
            return y
    
    #Function to load output data and convert 3D matrix (9000 example * 8 Receiver * 32 Transmitter) to 2D Matric (9000 example * 256 (Ti,Ri)
    
    def getBeamOutput(output_file):
    
        
    
        thresholdBelowMax = 6
    
        
    
        print("Reading dataset...", output_file)
    
        output_cache_file = np.load(output_file)
    
        yMatrix = output_cache_file['output_classification']
    
        
    
        yMatrix = np.abs(yMatrix)
    
        yMatrix /= np.max(yMatrix)
    
        yMatrixShape = yMatrix.shape
    
        num_classes = yMatrix.shape[1] * yMatrix.shape[2]
    
        
    
        y = yMatrix.reshape(yMatrix.shape[0],num_classes, order='F')
    
        y = beamsLogScale(y,thresholdBelowMax)
    
        
    
        return y,num_classes
    
    def moving_average(window_size,list_avg):
    
        i = 0
        output_1 = []
        while i < (len(list_avg) - window_size + 1):
            this_window = list_avg[i: (i + window_size)]
            window_average = sum(this_window)/window_size
            output_1.append(window_average)
            i = i + 1
        return output_1
    
    
    
    ###############################################################################
    
    # Data configuration
    
    ###############################################################################
    
    tf.device('/device:GPU:0')
    
    data_dir = args.data_folder+'/'
    
    tgtRec = 3
    
    
    
    if 'coord' in args.input: 
    
        ###############################################################################
    
        # Coordinate configuration
    
        #train
    
        coord_train_input_file = data_dir+'coord_input/coord_train.npz'
    
        coord_train_cache_file = np.load(coord_train_input_file)
    
        X_coord_train = coord_train_cache_file['coordinates']
    
        #validation
    
        coord_validation_input_file = data_dir+'coord_input/coord_validation.npz'
    
        coord_validation_cache_file = np.load(coord_validation_input_file)
    
        X_coord_validation = coord_validation_cache_file['coordinates']
        
        X_coord = np.concatenate((X_coord_train,X_coord_validation))
    
        coord_train_input_shape = X_coord_train.shape
        
        #test s009
    
        #coord_test_input_file_s009 = data_dir+'coord_input/coord_test_s009.npz'
    
        #coord_test_cache_file_s009 = np.load(coord_test_input_file_s009)
    
        #X_coord_test_s009 = coord_test_cache_file_s009['coordinates']  
    
    if 'img' in args.input:
    
        ###############################################################################
    
        # Image configuration
    
        resizeFac = 20 # Resize Factor
    
        nCh = 1 # The number of channels of the image
    
        imgDim = (360,640) # Image dimensions
    
        method = 1
    
        #train
    
        img_train_input_file = data_dir+'image_input/img_input_train_'+str(resizeFac)+'.npz'
    
        print("Reading dataset... ",img_train_input_file)
    
        img_train_cache_file = np.load(img_train_input_file)
    
        X_img_train = img_train_cache_file['inputs']
    
        #validation
    
        img_validation_input_file = data_dir+'image_input/img_input_validation_'+str(resizeFac)+'.npz'
    
        print("Reading dataset... ",img_validation_input_file)
    
        img_validation_cache_file = np.load(img_validation_input_file)
    
        X_img_validation = img_validation_cache_file['inputs']
        
        X_img = np.concatenate((X_img_train,X_img_validation))
    
        img_train_input_shape = X_img_train.shape
    
    if 'lidar' in args.input:
    
        ###############################################################################
    
        # LIDAR configuration
    
        #train
    
        lidar_train_input_file = data_dir+'lidar_input/lidar_train.npz'
    
        print("Reading dataset... ",lidar_train_input_file)
    
        lidar_train_cache_file = np.load(lidar_train_input_file)
    
        X_lidar_train = lidar_train_cache_file['input']
    
        #validation
    
        lidar_validation_input_file = data_dir+'lidar_input/lidar_validation.npz'
    
        print("Reading dataset... ",lidar_validation_input_file)
    
        lidar_validation_cache_file = np.load(lidar_validation_input_file)
    
        X_lidar_validation = lidar_validation_cache_file['input']
    
        X_lidar = np.concatenate((X_lidar_train,X_lidar_validation))
    
        lidar_train_input_shape = X_lidar_train.shape
        
        #Test s009
        
        #lidar_test_input_file_s009 = data_dir+'lidar_input/lidar_test_s009.npz'
    
        #print("Reading dataset... ",lidar_test_input_file_s009)
    
        #lidar_test_cache_file_s009 = np.load(lidar_test_input_file_s009)
    
        #X_lidar_test_s009 = lidar_test_cache_file_s009['input']
    
        #lidar_test_input_shape_s009 = X_lidar_test_s009.shape           
    
    ###############################################################################
    
    # Output configuration
    
    #train
    
    output_train_file = data_dir+'beam_output/beams_output_train.npz'
    
    y_train,num_classes = getBeamOutput(output_train_file)
    
    #validation
    
    output_validation_file = data_dir+'beam_output/beams_output_validation.npz'
    
    y_validation, _ = getBeamOutput(output_validation_file)
    
    y = np.concatenate((y_train,y_validation))
    
    #test s009
    
    #output_test_file = data_dir+'beam_output/beams_output_test.npz'
    
    #y_test,num_classes = getBeamOutput(output_test_file)
    
    ##############################################################################
    
    # Data split
    
    ##############################################################################

    X_coord_train, X_coord_validation, X_lidar_train, X_lidar_validation, y_train, y_validation = train_test_split(X_coord, X_lidar, y, test_size=0.15, random_state=1)
    
    ##############################################################################
    
    # Model configuration
    
    ##############################################################################
    
    
    
    #multimodal
    
    multimodal = False if len(args.input) == 1 else len(args.input)
    
    
    
    num_epochs = 100
    
    batch_size = 32
    
    validationFraction = 0.2 #from 0 to 1
    
    modelHand = ModelHandler()
    
    opt = Adam()
    
    
    
    if 'coord' in args.input:
    
        coord_model = modelHand.createArchitecture('coord_mlp',num_classes,coord_train_input_shape[1],'complete')
    
    if 'img' in args.input:
    
        num_epochs = 100
    
        if nCh==1:   
    
            img_model = modelHand.createArchitecture('light_image',num_classes,[img_train_input_shape[1],img_train_input_shape[2],1],'complete')
        else:
    
            img_model = modelHand.createArchitecture('light_image',num_classes,[img_train_input_shape[1],img_train_input_shape[2],img_train_input_shape[3]],'complete')
    
    if 'lidar' in args.input:
    
        lidar_model = modelHand.createArchitecture('lidar_marcus',num_classes,[lidar_train_input_shape[1],lidar_train_input_shape[2],lidar_train_input_shape[3]],'complete')
    
    
    
    if multimodal == 2:
    
        if 'coord' in args.input and 'lidar' in args.input:
    
            combined_model = concatenate([coord_model.output,lidar_model.output])
    
            z = Dense(num_classes,activation="softmax")(combined_model)
    
            model = Model(inputs=[coord_model.input,lidar_model.input],outputs=z)
    
            model.compile(loss=categorical_crossentropy,
    
                        optimizer=opt,
    
                        metrics=[metrics.categorical_accuracy,
    
                                metrics.top_k_categorical_accuracy,
    
                                top_50_accuracy, top_10_accuracy])
    
            model.summary()
    
            hist = model.fit([X_coord_train,X_lidar_train],y_train, 
    
            validation_data=([X_coord_validation, X_lidar_validation], y_validation),epochs=num_epochs,batch_size=batch_size)
            
            #validation_data=([X_coord_test_s009, X_lidar_test_s009], y_test),epochs=num_epochs,batch_size=batch_size)
            
            model_json = model.to_json()
            with open("model.json", "w") as json_file:
                json_file.write(model_json)
            
            model.save_weights("model.h5")
    
        elif 'coord' in args.input and 'img' in args.input:
    
            combined_model = concatenate([coord_model.output,img_model.output])
    
            z = Dense(num_classes,activation="relu")(combined_model)
    
            model = Model(inputs=[coord_model.input,img_model.input],outputs=z)
    
            model.compile(loss=categorical_crossentropy,
    
                        optimizer=opt,
    
                        metrics=[metrics.categorical_accuracy,
    
                                metrics.top_k_categorical_accuracy,
    
                                top_50_accuracy, top_10_accuracy])
    
            model.summary()
    
            hist = model.fit([X_coord_train,X_img_train],y_train,
    
            validation_data=([X_coord_validation, X_img_validation], y_validation), epochs=num_epochs,batch_size=batch_size)
    
        
    
        else:
    
            combined_model = concatenate([lidar_model.output,img_model.output])
    
            z = Dense(num_classes,activation="relu")(combined_model)
    
            model = Model(inputs=[lidar_model.input,img_model.input],outputs=z)
    
            model.compile(loss=categorical_crossentropy,
    
                        optimizer=opt,
    
                        metrics=[metrics.categorical_accuracy,
    
                                metrics.top_k_categorical_accuracy,
    
                                top_50_accuracy, top_10_accuracy])
    
            model.summary()
    
            hist = model.fit([X_lidar_train,X_img_train],y_train, 
    
            validation_data=([X_lidar_validation, X_img_validation], y_validation), epochs=num_epochs,batch_size=batch_size)
    
    elif multimodal == 3:
    
        combined_model = concatenate([lidar_model.output,img_model.output, coord_model.output])
    
        z = Dense(num_classes,activation="relu")(combined_model)
    
        model = Model(inputs=[lidar_model.input,img_model.input, coord_model.input],outputs=z)
    
        model.compile(loss=categorical_crossentropy,
    
                    optimizer=opt,
    
                    metrics=[metrics.categorical_accuracy,
    
                            metrics.top_k_categorical_accuracy,
    
                            top_50_accuracy, top_10_accuracy])
    
        model.summary()
    
        hist = model.fit([X_lidar_train,X_img_train,X_coord_train],y_train,
    
                validation_data=([X_lidar_validation, X_img_validation, X_coord_validation], y_validation),
    
                epochs=num_epochs,batch_size=batch_size)
    
    
    
    else:
    
        if 'coord' in args.input:
    
            model = coord_model
    
            model.compile(loss=categorical_crossentropy,
    
                                optimizer=opt,
    
                                metrics=[metrics.categorical_accuracy,
    
                                        metrics.top_k_categorical_accuracy,
    
                                        top_50_accuracy, top_10_accuracy])
    
            model.summary()
    
            hist = model.fit(X_coord_train,y_train, 
    
            validation_data=(X_coord_validation, y_validation),epochs=num_epochs,batch_size=batch_size)
    
    
    
        elif 'img' in args.input:
    
            model = img_model
    
            model.compile(loss=categorical_crossentropy,
    
                        optimizer=opt,
    
                        metrics=[metrics.categorical_accuracy,
    
                                metrics.top_k_categorical_accuracy,
    
                                top_50_accuracy, top_10_accuracy])
    
            model.summary()
    
            hist = model.fit(X_img_train,y_train, 
    
            validation_data=(X_img_validation, y_validation),epochs=num_epochs,batch_size=batch_size)
    
    
    
        else:
    
            model = lidar_model
    
            model.compile(loss=categorical_crossentropy,
    
                        optimizer=opt,
    
                        metrics=[metrics.categorical_accuracy,
    
                                metrics.top_k_categorical_accuracy,
    
                                top_50_accuracy, top_10_accuracy])
    
            model.summary()
    
            hist = model.fit(X_lidar_train,y_train, 
    
            validation_data=(X_lidar_validation, y_validation),epochs=num_epochs,batch_size=batch_size)
    
    
    
    if args.plots:
    
        import matplotlib.pyplot as plt
    
    
    
    
    
        import matplotlib     
    
        matplotlib.rcParams.update({'font.size': 10})
        
        f1 = open('Train_acc_Combinedmodel_Lidar_Coor.txt', "w")
        
        f2 = open('Val_acc_Combinedmodel_Lidar_Coor.txt', "w")
        
        acc = hist.history['top_10_accuracy']
    
        val_acc = hist.history['val_top_10_accuracy']
        
        for s in acc:
            f1.write(str(s) +"\n")
        
        f1.close()

        for s in val_acc:
            f2.write(str(s) +"\n")        
        
        f2.close()        
    
        loss = hist.history['loss']
    
        val_loss = hist.history['val_loss']
    
        epochs = range(1, len(acc)+1)
        
        plt.xlabel('Epochs')
        
        plt.ylabel('Accuracy')
        
        plt.title('Model Accuracy')
        
        #plt.ylim(0.7,1)
        
        plt.plot(epochs, acc, 'b--', label='Train_acc_Combinedmodel_Lidar_Coor',linewidth=2)
        
        plt.plot(epochs, val_acc, 'g--', label='Val_acc_Combinedmodel_Lidar_Coor',linewidth=2)
        
        plt.legend()
                
        plt.show()
Example #5
0
y_train, num_classes = getBeamOutput(output_train_file)

output_validation_file = data_dir + "beam_output/beams_output_validation.npz"
y_validation, _ = getBeamOutput(output_validation_file)

##############################################################################
# Model configuration
##############################################################################

# multimodal
multimodal = False if len(args.input) == 1 else len(args.input)

num_epochs = 3
batch_size = 32
validationFraction = 0.2  # from 0 to 1
modelHand = ModelHandler()
opt = Adam()

if "coord" in args.input:
    coord_model = modelHand.createArchitecture("coord_mlp", num_classes,
                                               coord_train_input_shape[1],
                                               "complete")
if "img" in args.input:
    num_epochs = 5
    if nCh == 1:
        img_model = modelHand.createArchitecture(
            "light_image",
            num_classes,
            [img_train_input_shape[1], img_train_input_shape[2], 1],
            "complete",
        )
Example #6
0
                                                  (20, 200, 10))
            # y_validation, X_lidar_validation = balance_data(Initial_labels_val,X_lidar_validation,0.001,(20, 200, 10))
            save_npz(args.augmented_folder + 'lidar_input/', 'lidar_train.npz',
                     X_lidar_train, 'lidar_validation.npz', X_lidar_validation)
            save_npz(args.augmented_folder + 'beam_output/',
                     'beams_output_train.npz', y_train,
                     'beams_output_validation.npz', y_validation)

##############################################################################
# Model configuration
##############################################################################
#multimodal
multimodal = False if len(args.input) == 1 else len(args.input)
fusion = False if len(args.input) == 1 else True

modelHand = ModelHandler()
opt = Adam(lr=args.lr,
           beta_1=0.9,
           beta_2=0.999,
           epsilon=None,
           decay=0.0,
           amsgrad=False)

if 'coord' in args.input:
    if args.restore_models:
        coord_model = load_model_structure(args.model_folder +
                                           'coord_model.json')
        coord_model.load_weights(args.model_folder + 'best_weights.coord.h5',
                                 by_name=True)
    else:
        coord_model = modelHand.createArchitecture('coord_mlp', num_classes,
Example #7
0
from SiameseNet import SiameseNet
from ModelHandler import ModelHandler
from DataHandler import DataHandler
from plotters import Plotter

# %% MNIST dataset
dh = DataHandler("MNIST", classes_to_select=[0, 1, 2, 3, 4, 5, 6])
#dh_newdata = DataHandler("MNIST", classes_to_select=[7,8,9])

# %% Define embedding model
mh = ModelHandler(model_number=4,
                  embedding_size=200,
                  input_feature_dim=dh.shape)

# %% Define siamese net
#alphas = [0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]
alphas = [0.1]
for alpha in alphas:
    net = SiameseNet(mh, dh, alpha)
    net.print_model()
    batch_size = 200
    epochs = 1
    steps_per_epoch = 1  #int(dh.n_train / batch_size)
    history = net.train("create_pair_batch_random", batch_size, epochs,
                        steps_per_epoch)

    # % Plot loss
    # Losses
    plotter = Plotter()
    plotter.plot_losses(net, history)
Example #8
0
y_train, num_classes = getBeamOutput(output_train_file)

output_validation_file = data_dir + 'beam_output/beams_output_validation.npz'
y_validation, _ = getBeamOutput(output_validation_file)

##############################################################################
# Model configuration
##############################################################################

#multimodal
multimodal = False if len(args.input) == 1 else len(args.input)

num_epochs = 3
batch_size = 32
validationFraction = 0.2  #from 0 to 1
modelHand = ModelHandler()
opt = Adam()

if 'coord' in args.input:
    coord_model = modelHand.createArchitecture('coord_mlp', num_classes,
                                               coord_train_input_shape[1],
                                               'complete')
if 'img' in args.input:
    num_epochs = 5
    if nCh == 1:
        img_model = modelHand.createArchitecture(
            'light_image', num_classes,
            [img_train_input_shape[1], img_train_input_shape[2], 1],
            'complete')
    else:
        img_model = modelHand.createArchitecture('light_image', num_classes, [
Example #9
0
conv1_out = 68
conv2_out = 68
conv3_out = 68
fc_input = 512
learning_rate = 0.01
epoch_size = 15

x = tf.placeholder('float', [None, input_count])
y = tf.placeholder('float')

keep_rate = 0.5
keep_prob = tf.placeholder(tf.float32)

data_handler = DataHandler(input_count, n_classes, input_file_path)
data_handler_test = DataHandler(input_count, n_classes, input_test_file_path)
model_handler = ModelHandler(model_name="2048-cnn")


def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


def maxpool2d(x):
    #                        size of window         movement of window
    return tf.nn.max_pool(x,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME')


def convolutional_neural_network(x):
Example #10
0
activate_this = "/home/pi/.virtualenvs/cv/bin/activate_this.py"
execfile(activate_this, dict(__file__=activate_this))
import cv2
import cv2
import time
import json
from RequestOrganizer import RequestOrganizer
from ModelHandler import ModelHandler

model_handler = ModelHandler()


class FaceDetector:
    def __init__(self):
        self.configs = {}
        self.train_mode = False

        with open('config.json') as configs:
            self.configs = json.loads(configs.read())

        print("LOADING VIDEO CAMERA")
        self.OpenCVCapture = cv2.VideoCapture(0)

    def deviceCommandsCallback(self, topic, payload):
        print("Received command data: %s" % (payload))
        newSettings = json.loads(payload.decode("utf-8"))


face_detector = FaceDetector()
model = cv2.face.EigenFaceRecognizer_create(
    threshold=face_detector.configs["ClassifierSettings"]
Example #11
0
                          loaded_model)
inputLayers = []
treeOfLayers.children[0].fillLeafNodeArray(inputLayers)

# Assert that the graph layers have one input and one output.
TopologyFinder.assertSuitableTopology(inputLayers)
"""
print ("Creating verilog for the following graph: ")
TopologyFinder.printTopology(inputLayers)
"""

# Get quantization data
quantizer = Quantizer(loaded_model, np.loadtxt(sys.argv[2]), 0, 255, 15)
quantizer.quantizeModelWeights()

# Start writing structural verilog.
verilogPrinter = VerilogPrinter(open("tensorFlowModel.v", "w"))
verilogPrinter.defineClkAndDataWidth(8)
verilogPrinter.printGroundSignal()
inputWires = ["input_index", "input_value", "input_enable"]
verilogPrinter.printInputWires([
    math.ceil(math.log(inputLayers[0].value.get_weights()[0].shape[0], 2)),
    "`DATA_WIDTH"
], inputWires)
inputWires = ModelHandler.createVerilogForGivenLayer(inputLayers[0].value,
                                                     verilogPrinter, quantizer,
                                                     inputWires)
ModelHandler.createVerilogForAllLayers(inputLayers[0], verilogPrinter,
                                       quantizer, inputWires)
verilogPrinter.output_file.close()
Example #12
0
    del image_paths_l[len(image_paths_r):]
    del output_paths_l[len(image_paths_r):]
elif len(image_paths_l) < len(image_paths_r):
    del image_paths_r[len(image_paths_l):]
    del output_paths_r[len(image_paths_l):]


train_input_l, valid_input_l, train_input_r, valid_input_r, train_output_l, valid_output_l, \
train_output_r, valid_output_r = tts(image_paths_l, image_paths_r, output_paths_l, output_paths_r,
                                         test_size=0.01, random_state=5)

# plt.imshow(o)

if model_train:
    if new_model:
        model_handler = ModelHandler()
        model_handler.get_model_parameters()
        history = model_handler.model_fit(train_input_l, train_input_r,
                                          train_output_l, train_output_r,
                                          valid_input_l, valid_input_r,
                                          valid_output_l, valid_output_r)

        model_handler.model_save()
        model_handler.model_plot_result(history)
    else:
        model_handler = ModelHandler(load_from='model_x.h5')
        model_handler.get_model_parameters()
        history = model_handler.model_fit(train_input_l, train_input_r,
                                          train_output_l, train_output_r,
                                          valid_input_l, valid_input_r,
                                          valid_output_l, valid_output_r)