Exemplo n.º 1
0
def train_interaction_model(x_train, y_train, x_test, y_test):
    print('Training interaction model')
    model = get_default_model(x_train.shape[1])
    compile_model(model)
    tf.keras.models.save_model(model,
                               'models/{}_random.h5'.format(FLAGS.dataset))

    callback = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                patience=5,
                                                mode='min')
    model.fit(x=x_train,
              y=y_train,
              batch_size=128,
              epochs=FLAGS.epochs,
              verbose=0,
              validation_split=0.2,
              callbacks=[callback])
    tf.keras.models.save_model(model, 'models/{}.h5'.format(FLAGS.dataset))
Exemplo n.º 2
0
def main():
    log.basicConfig(
        format='[ %(levelname)s ] %(message)s',
        level=log.INFO,
        stream=sys.stdout
    )
    args = build_parser().parse_args()
    try:
        model_wrapper = openvino_io_model_wrapper()
        data_transformer = openvino_transformer()
        io = io_adapter.get_io_adapter(args, model_wrapper, data_transformer)
        core = utils.create_core(
            args.extension,
            args.intel_gpu_config,
            args.device,
            args.nthreads,
            args.nstreams,
            args.dump,
            'async',
            log
        )
        model = utils.create_model(core, args.model_xml, args.model_bin, log)
        utils.configure_model(core, model, args.device, args.default_device, args.affinity)
        input_shapes = utils.get_input_shape(model_wrapper, model)
        for layer in input_shapes:
            log.info('Shape for input layer {0}: {1}'.format(layer, input_shapes[layer]))
        utils.reshape_input(model, args.batch_size)
        log.info('Prepare input data')
        io.prepare_input(model, args.input)
        log.info('Create executable network')
        compiled_model = utils.compile_model(core, model, args.device, args.priority)
        log.info('Starting inference ({} iterations) with {} requests on {}'.format(args.number_iter,
                                                                                    args.requests,
                                                                                    args.device))
        result, time = infer_async(compiled_model, args.number_iter, args.requests, io.get_slice_input)
        average_time, fps = process_result(time, args.batch_size, args.number_iter)
        if not args.raw_output:
            io.process_output(result, log)
            result_output(average_time, fps, log)
        else:
            raw_result_output(average_time, fps)
        del model
        del compiled_model
        del core
    except Exception as ex:
        print('ERROR! : {0}'.format(str(ex)))
        sys.exit(1)
Exemplo n.º 3
0
def test_samples():
    # Hyper params
    BATCH_SIZE = 256
    LOAD_WEIGHTS = True
    WEIGHTS_PATH = 'weights/'
    WEIGHTS_FILE = 'asr-weights.hdf5'

    model = base_model()
    model = load_model(model,
                       os.path.join(WEIGHTS_PATH, WEIGHTS_FILE),
                       load_weights=LOAD_WEIGHTS)
    model = compile_model(model)

    # load test wav samples
    test_samples = load_wav_list('test-samples/')

    # patch sample data
    X, Y, _ = preprocess(test_samples,
                         start=0,
                         end=len(test_samples) - 1,
                         sr=48000,
                         scale=6,
                         dimension=256,
                         stride=256,
                         tag='test')

    print(X.shape)
    print(Y.shape)

    # predict
    pred = model.predict(X)

    # evaluate
    scores = model.evaluate(X, Y)
    print('Evaluate scores')
    for score in scores:
        print('- %10f' % (score))

    STFT(Y.flatten(), title='Original', n_fft=2048, show=True)
    STFT(X.flatten(), title='Downsampled', n_fft=2048, show=True)
    STFT(pred.flatten(), title='Upsampled', n_fft=2048, show=True)
Exemplo n.º 4
0
def run():
    # Hyper params
    EPOCHS = 1
    BATCH_SIZE = 256
    LOAD_WEIGHTS = True
    WEIGHTS_PATH = 'weights/'
    WEIGHTS_FILE = 'asr-weights.hdf5'
    VALID_SPLIT = 0.05
    SHUFFLE = True
    MINI_EPOCH = 1  # set each dataset's epochs

    model = base_model()
    model = load_model(model,
                       os.path.join(WEIGHTS_PATH, WEIGHTS_FILE),
                       load_weights=LOAD_WEIGHTS)
    model = compile_model(model)

    datasets = load_h5_list('data/')
    checkpointer = ModelCheckpoint(filepath=os.path.join(
        WEIGHTS_PATH, WEIGHTS_FILE),
                                   verbose=1,
                                   save_best_only=True)
    earlystopper = EarlyStopping(monitor='val_loss', patience=3, verbose=0)

    for i in range(EPOCHS):
        print('#REAL EPOCH(%3d/%3d)' % (i + 1, EPOCHS))
        for dataset in datasets:
            X, Y = load_h5(dataset)
            model.fit(X,
                      Y,
                      batch_size=BATCH_SIZE,
                      epochs=MINI_EPOCH,
                      shuffle=SHUFFLE,
                      callbacks=[checkpointer, earlystopper],
                      validation_split=VALID_SPLIT)

    print('Training Finish')
Exemplo n.º 5
0
def train_and_accuracy(network: list, generation_no: int, network_no: int,
                       time: str):
    '''
    To train the randomly created network and find get accuracy values.
    
    Args:
        network (list): list of parameters of network.
        generation_no (int): Number of times to evolve the population.
        network_no (int): in each generation there are fixed number of network which are created.    
        time: current time ("current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")")
    '''

    logging.debug('In train and accuracy function.')

    train_path = "Train"
    test_path = "Test"

    epochs = 30
    batch_size = network['Batch_Size']

    image_height = 144
    image_width = 176

    # Training and test Images
    train_images = os.listdir(
        '../../DataRecording/New_Recording/image_processing/All_images/Train')
    test_images = os.listdir(
        '../../DataRecording/New_Recording/image_processing/All_images/Test')

    train_img = []
    for image in train_images:
        image1 = image.split('.')
        train_img.append(image1[0])
    #print(f'Length of training set: {len(train_img)}')

    test_img = []
    for image in test_images:
        image1 = image.split('.')
        test_img.append(image1[0])
    #print(f'Length of test set: {len(test_img)}')

    ## Validation Data Size
    val_data_size = 600

    valid_img = train_img[:val_data_size]
    train_img = train_img[val_data_size:]

    train_gen = DataGen(train_img,
                        train_path,
                        image_height=image_height,
                        image_width=image_width,
                        batch_size=batch_size)
    valid_gen = DataGen(valid_img,
                        train_path,
                        image_height=image_height,
                        image_width=image_width,
                        batch_size=batch_size)

    train_steps = len(train_img) // batch_size
    valid_steps = len(valid_img) // batch_size

    model, MAC_fitness, Kernel_list, Layers, Mac, Mem = utils.compile_model(
        network, generation_no, network_no, time)

    #Tensorboard
    '''tb_callback = keras.callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
    
    history = model.fit_generator(train_gen, validation_data=valid_gen, steps_per_epoch=train_steps, validation_steps=valid_steps, epochs=epochs, callbacks = [tb_callback])'''

    history = model.fit_generator(train_gen,
                                  validation_data=valid_gen,
                                  steps_per_epoch=train_steps,
                                  validation_steps=valid_steps,
                                  epochs=epochs)

    accuracy = history.history['acc']
    loss = history.history['loss']

    acc, los = [], []
    for acc_value in accuracy:
        acc.append(f'{acc_value: 0.4f}')
    for los_value in loss:
        los.append(f'{los_value: 0.4f}')

    logging.info(f'Accuracy and loss at each epoch: {acc}, {los}')

    # Save the model.
    model_json = model.to_json()
    with open(
            f'model_summary/{time}/model_{generation_no+1}_{network_no+1}.json',
            "w") as json_file:
        json_file.write(model_json)
    # serialize weights to HDF5
    model.save_weights(
        f'model_summary/{time}/model_{generation_no+1}_{network_no+1}.h5')
    logging.info("Saved model to disk")

    # Generate test dataset.
    test_gen = DataGen(test_img,
                       test_path,
                       image_width=176,
                       image_height=144,
                       batch_size=batch_size)

    # Accuracy for test dataset.
    eval_accuracy = 0
    for i in range(len(test_img) // batch_size):
        x, y = test_gen.__getitem__(i)
        score = model.evaluate(x, y)
        eval_accuracy += score[1]
    eval_accuracy /= (len(test_img) / batch_size)

    logging.info(
        f'Evaluation accuracy: {eval_accuracy: 0.4f}, MAC and Memory: {MAC_fitness}'
    )

    return total_score, 1 * (MAC_fitness), Kernel_list, Layers, Mac, Mem
Exemplo n.º 6
0
     config.iterations = 100
     config.warmups = 100
     config.chains = 1
     config.thin = 1
 if args.iterations is not None:
     config.iterations = args.iterations
 if args.warmups is not None:
     config.warmups = args.warmups
 if args.chains is not None:
     config.chains = args.chains
 if args.thin is not None:
     config.thin = args.thin
 try:
     # Compile
     compile_model(posterior=posterior,
                   backend=args.backend,
                   mode=args.mode)
     # Run and Compare
     compare(
         posterior=posterior,
         backend=args.backend,
         mode=args.mode,
         config=config,
         logfile=logfile,
     )
 except:
     exc_type, exc_value, _ = sys.exc_info()
     err = " ".join(
         traceback.format_exception_only(exc_type, exc_value))
     err = re.sub(r"[\n\r\",]", " ", err)[:150] + "..."
     logger.error(f"Failed {name} with {err}")
Exemplo n.º 7
0
# initiate the list of number of epochs to record all loops n_of_e
num_of_epochs = []

print(scaled_train_images[0].shape)

for n in range(min_num_layers, max_num_layers + 1):
    # create the model
    model = get_model(n, scaled_train_images[0].shape)

    layer_name = 'Pooya'
    intermediate_layer_model = keras.Model(
        inputs=model.input, outputs=model.get_layer(layer_name).output)
    intermediate_output = intermediate_layer_model(scaled_test_images)

    # compile the model
    utils.compile_model(model, ['accuracy'])  #, 'val_accuracy'

    # create path for saving model
    path = f'GlobalPooling/{n}-Layers/'
    # set model save settings
    checkpoint = ModelCheckpoint(path + 'GP.Ep{epoch:02d}',
                                 save_weights_only=False,
                                 save_freq='epoch')
    # form callback list
    call_backs = [checkpoint, early_stop]

    # train the model with the scaled training images
    t0 = time.time()
    run_log = utils.train_model(model, scaled_train_images, train_labels,
                                scaled_val_images, val_labels, num_epoch,
                                batch_size, call_backs)
Exemplo n.º 8
0
num_of_epochs = []

# metrics to monitor and record in history
MET = ['accuracy']

# validation precision and recall collectors for plots
all_prec = []
all_rcll = []
max_epoch = []

for n in range(min_num_layers, max_num_layers + 1):
    # create the model
    model = get_model(n, scaled_train_images[0].shape)

    # compile the model
    utils.compile_model(model, MET)

    # create path for saving model
    path = f'02_IBS-Saved Model/{n}-Layers/'
    # set model save settings
    checkpoint = ModelCheckpoint(path + 'IBS_Ep{epoch:02d}',
                                 save_weights_only=False, save_freq='epoch')
    # form callback list
    call_backs = [
        checkpoint,
        early_stop,
        utils.PredictionCallback(scaled_val_images, val_labels)
    ]

    # train the model with the scaled training images
    t0 = time.time()
Exemplo n.º 9
0
    if FINE_TUNE:
        print("fine-tuning is on...")
    # load image generators
    train_gen, valid_gen = load_generators(TRAIN_DIR,
                                           batch_size=BATCH_SIZE,
                                           val_split=0.3)

    N_STEPS = train_gen.samples // BATCH_SIZE
    N_VAL_STEPS = valid_gen.samples // BATCH_SIZE

    # optimizer
    optim = Adagrad(lr=args.learn_rate)

    # compile the model
    print("compiling..")
    model = compile_model(INPUT_SHAPE, NUM_CLASSES, optim, fine_tune=None)
    model.summary()

    print("loading model callbacks..")
    MODEL_WEIGHTS = '\\'.join(
        SAVE_PATH.split('\\')[:-1]
    ) + '\\model.weights.best.hdf5'  # use model's save path to also save weights
    checkpoint = ModelCheckpoint(
        filepath=MODEL_WEIGHTS,  #save weights in same loc as model
        monitor='val_categorical_accuracy',
        save_best_only=True,
        verbose=1)

    early_stop = EarlyStopping(monitor='val_categorical_accuracy',
                               patience=10,
                               restore_best_weights=True,
Exemplo n.º 10
0
x_test /= 255

y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

# Run the data through a few MLP models and save the activations from
# each layer into a Pandas DataFrame.
rows = []
sigmas = [0.10, 0.14, 0.28]
for stddev in sigmas:
    init = initializers.RandomNormal(mean=0.0, stddev=stddev, seed=seed)
    activation = 'relu'

    model = create_mlp_model(n_hidden_layers, dim_layer, (data_dim, ),
                             n_classes, init, 'zeros', activation)
    compile_model(model)
    output_elts = get_activations(model, x_test)
    n_layers = len(model.layers)
    i_output_layer = n_layers - 1

    for i, out in enumerate(output_elts[:-1]):
        if i > 0 and i != i_output_layer:
            for out_i in out.ravel()[::20]:
                rows.append([i, stddev, out_i])

df = pd.DataFrame(rows,
                  columns=['Hidden Layer', 'Standard Deviation', 'Output'])

# Plot previously saved activations from the 5 hidden layers
# using different initialization schemes.
fig = plt.figure(figsize=(12, 6))