Пример #1
0
 def _test_episode(self):
     neuralnet = gen_model()
     train = TrainNeuralNet(neuralnet)
     output = train.episode()
     for out in output:
         for e in out[1]:
             if e > 0:
                 print(e)
Пример #2
0
def simulate(n, m, base, c = None, verbose = False):
    population = gen_population(n, data.income_distribution)
    threshold = gen_threshold(n, m, population, data.audits_distribution, base)

    if c == None:
        if verbose: print("Generating model...")
        c = model.gen_model(population, m, verbose = verbose)

    avg_u = 0
    avg_evaders = 0
    for _ in range(data.simulation_iteration_number):
        audits = draw_from_c(c, m)
        evaders = draw_from_threshold(c, threshold, population)
        avg_evaders += len(evaders)
        avg_u += deterministic_defender_utility(audits, evaders, population)
    avg_u /= data.simulation_iteration_number
    avg_evaders /= data.simulation_iteration_number

    if verbose: print("Average number of evaders: " + str(avg_evaders))

    return avg_u
Пример #3
0
#!/usr/bin/python
# -*- coding: utf-8 -*-
import warnings
warnings.filterwarnings('ignore')
import cv2
import numpy as np
from PIL import Image
from keras.applications.mobilenet_v2 import preprocess_input
import model

confidence = 0.5
WIDTH, HEIGTH = 224, 224

if __name__ == '__main__':
    #Load the saved model
    model = model.gen_model()
    video = cv2.VideoCapture(0)
    video.set(cv2.CAP_PROP_FRAME_WIDTH, 1000)
    video.set(cv2.CAP_PROP_FRAME_HEIGHT, 1000)

    while True:
        _, frame = video.read()
        #Convert the captured frame into RGB
        im = Image.fromarray(frame, 'RGB')
        origin_width, origin_heigth = im.size
        rescale_factor_width = origin_width / WIDTH
        rescale_factor_heigth = origin_heigth / HEIGTH
        #Resizing into 128x128 because we trained the model with this image size.
        im = im.resize((WIDTH, HEIGTH))
        img_array = np.array(im)
Пример #4
0
def test_and_train(model_name='speech2speech', retrain=True):
    """ 
    Test and/or train on given dataset 

    @param model_name name of model to save.
    @param retrain True if retrain, False if load from pretrained model
  """

    (X_train, y_train), (X_val, y_val), (X_test, y_test) = load_dataset(
        'raw',
        nfft=NFFT,
        hop_len=HOP_LENGTH,
        fs=FS,
        stacked_frames=STACKED_FRAMES,
        chunk=CHUNK)
    model = None

    X_train_norm = normalize_sample(X_train)
    X_val_norm = normalize_sample(X_val)
    X_test_norm = normalize_sample(X_test)

    y_train_norm = normalize_sample(y_train)
    y_val_norm = normalize_sample(y_val)
    y_test_norm = normalize_sample(y_test)

    print("X shape:", X_train_norm.shape, "y shape:", y_train_norm.shape)

    # Xtn_strided = stride_over(X_train_norm)
    # Xvn_strided = stride_over(X_val_norm)
    # Xten_strided = stride_over(X_test_norm)

    # Xtn_reshape = Xtn_strided
    # Xvn_reshape = Xvn_strided
    # Xten_reshape = Xten_strided

    # ytn_reshape = y_train_norm.reshape(-1, NFFT//2 + 1, 1, 1)
    # yvn_reshape = y_val_norm.reshape(-1, NFFT//2 + 1, 1, 1)
    # yten_reshape = y_test_norm.reshape(-1, NFFT//2 + 1, 1, 1)

    # train_dataset = tf.data.Dataset.from_tensor_slices((Xtn_reshape,
    #                                                     ytn_reshape)).batch(X_train_norm.shape[1]).shuffle(X_train.shape[0]).repeat()
    # val_dataset = tf.data.Dataset.from_tensor_slices((Xvn_reshape, yvn_reshape)).batch(X_val_norm.shape[1]).repeat(1)

    # train_dataset = tf.data.Dataset.from_tensor_slices((X_train_norm, y_train_norm)).batch(BATCH_SIZE).shuffle(BATCH_SIZE).repeat()
    # val_dataset = tf.data.Dataset.from_tensor_slices((X_val_norm, y_val_norm)).batch(BATCH_SIZE).repeat(1)

    # print(list(train_dataset.as_numpy_iterator())[0])

    # Scale the sample X and get the scaler
    # scaler = scale_sample(X)

    # Check if model already exists and retrain is not being called again
    if (os.path.isfile(os.path.join(MODEL_DIR, model_name, 'model.json'))
            and not retrain):
        model = model_load(model_name)
        # Compile the model
        model.compile(loss=LOSS, optimizer=OPTIMIZER, metrics=METRICS)
    else:
        if not os.path.isdir(os.path.join(MODEL_DIR, model_name)):
            create_model_directory(model_name)

        baseline_val_loss = None

        model = None

        # model = gen_model(tuple(Xtn_reshape.shape[1:]))
        model = gen_model(tuple(X_train_norm.shape[1:]))
        print('Created Model...')

        model.compile(loss=LOSS, optimizer=OPTIMIZER, metrics=METRICS)
        print('Metrics for Model...')

        # print(list(train_dataset.as_numpy_iterator())[0])

        tf.keras.utils.plot_model(model,
                                  show_shapes=True,
                                  dpi=96,
                                  to_file=os.path.join(MODEL_DIR, model_name,
                                                       'model.png'))
        print(model.metrics_names)

        early_stopping_callback = tf.keras.callbacks.EarlyStopping(
            monitor='val_loss', patience=5, restore_best_weights=True)

        if (os.path.isfile(path(MODEL_DIR, model_name))):
            model.load_weights(path(MODEL_DIR, model_name))
            baseline_val_loss = model.evaluate(X_val_norm, y_val_norm)[0]
            print(baseline_val_loss)
            early_stopping_callback = tf.keras.callbacks.EarlyStopping(
                monitor='val_loss',
                patience=5,
                restore_best_weights=True,
                baseline=baseline_val_loss)

        log_dir = os.path.join(
            LOGS_DIR, 'files',
            datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))

        tensorboard_callback = tf.keras.callbacks.TensorBoard(
            log_dir=log_dir, update_freq='batch')

        model_checkpoint_callback = ModelCheckpoint(monitor='val_loss',
                                                    filepath=os.path.join(
                                                        MODEL_DIR, model_name,
                                                        'model.h5'),
                                                    save_best_only=True,
                                                    save_weights_only=False,
                                                    mode='min')

        # fit the keras model on the dataset
        cbs = [early_stopping_callback,
               model_checkpoint_callback]  # tensorboard_callback

        model.fit(X_train_norm,
                  y_train_norm,
                  epochs=EPOCHS,
                  validation_data=(X_val_norm, y_val_norm),
                  verbose=1,
                  callbacks=cbs,
                  batch_size=BATCH_SIZE)
        print('Model Fit...')

        model = save_model(model, model_name)

    # model = model_load(model_name)

    [loss, mse, accuracy,
     rmse] = model.evaluate(X_test_norm, y_test_norm,
                            verbose=0)  # _, mse, accuracy =
    print(
        'Testing accuracy: {}, Testing MSE: {}, Testing Loss: {}, Testing RMSE: {}'
        .format(accuracy * 100, mse, loss, rmse))

    # # Randomly pick 1 test
    idx = 32
    print(idx)
    X = X_test_norm[idx]
    y = y_test_norm[idx]
    # y = y_test_norm[idx].reshape(-1, NFFT//2 + 1)
    # min_y, max_y = np.min(y_test_norm[idx]), np.max(y_test_norm[idx])
    # min_x, max_x = np.min(y_test_norm[idx]), np.max(y_test_norm[idx])
    # print("MinY: {}\tMaxY{}".format(min_y, max_y))
    # print("MinX: {}\tMaxX{}".format(min_x, max_x))

    X = np.expand_dims(X, axis=0)
    # X = stride_over(X)

    # mean = np.mean(X)
    # std = np.std(X)
    # X = (X-mean) / std

    print(X.shape)

    # y_pred = model.predict(X)
    y_pred = np.squeeze(model.predict(X), axis=0)
    # y_pred = y_pred.reshape(-1, NFFT//2 + 1)

    print(y.shape)
    print(y_pred.shape)

    y = y.T
    y_pred = y_pred.T
    X_test_norm = X_test_norm[idx].T

    # GriffinLim Vocoder
    output_sound = librosa.core.griffinlim(y_pred)
    input_sound = librosa.core.griffinlim(X_test_norm)
    target_sound = librosa.core.griffinlim(y)

    # Play and plot all
    play_sound(input_sound, output_sound, target_sound, FS)

    if not os.path.isdir(os.path.join(MODEL_DIR, model_name, 'audio_output')):
        create_model_directory(os.path.join(model_name, 'audio_output'))

    librosa.output.write_wav(path(MODEL_DIR, model_name, 'audio_output',
                                  'input.wav'),
                             input_sound,
                             sr=FS,
                             norm=True)
    librosa.output.write_wav(path(MODEL_DIR, model_name, 'audio_output',
                                  'target.wav'),
                             target_sound,
                             sr=FS,
                             norm=True)
    librosa.output.write_wav(path(MODEL_DIR, model_name, 'audio_output',
                                  'predicted.wav'),
                             output_sound,
                             sr=FS,
                             norm=True)

    return
Пример #5
0
        return loss


class Dataset(chainer.dataset.DatasetMixin):
    def __init__(self, data):
        super(chainer.dataset.DatasetMixin, self).__init__()
        self.data = data

    def __len__(self):
        return len(self.data)

    def get_example(self, i):
        return self.data[i]


deepCoder = M.gen_model()
model = Model(deepCoder)

f = open(sys.argv[1], 'r')
x = json.load(f)
y = M.preprocess_json(x)

print(len(y))

l1 = [e for e in range(0, len(y)) if e % 100 != 0]
l2 = [e for e in range(0, len(y)) if e % 100 == 0]

train = Dataset([y[e] for e in l1])
test = Dataset([y[e] for e in l2])

try:
Пример #6
0
 def _test_train(self):
     neuralnet = gen_model()
     train = TrainNeuralNet(neuralnet)
     train.train()
Пример #7
0
 def _test_actionprob(self):
     game = Tafl()
     neuralnet = gen_model()
     mcts = MCTS(neuralnet)
     mcts.action_probability(game)
Пример #8
0
 def _test_prediction(self):
     game = Tafl()
     neuralnet = gen_model()
     mcts = MCTS(neuralnet)
     mcts.perform_search(game)
Пример #9
0
#building this model for having the controller as the model_ntm and also for controller architecture as Dense
output_dim = 8
input_dim = ouput_dim + 2
batch_size = 100
read_head = 1
write_head = 1

lr = 5e-4
clipnorm = 10
sgd = Adam(lr=lr, clipnorm=clipnorm)
sameInit = RandomNormal(seed=0)

controller = Sequential()
controller_name = "dense"

controller_shape(input_dim, ouput_dim, 20, 128, 3, read_head, write_head)
controller.add(
    Dense(units=controller_output_dim,
          kernel_initializer=sameInit,
          bias_intializer=sameInit,
          activation='linear',
          input_dim=controller_input_dim))

controller.compile(loss="binary_crossentropy",
                   optimizer=sgd,
                   metrics=["binary_accuracy"],
                   sample_weight_mode="temporal")

model = model.gen_model(input_dim=input_dim, output_dim=output_dim, batch_size=batch_size,\
        controller_model=controller, read_head=read_head, write_head=write_head,activation="sigmoid")