def D3GenerateModel(n_filter=16,
                    number_of_class=1,
                    input_shape=(16, 144, 144, 1),
                    activation_last='softmax',
                    metrics=[
                        'mse', 'acc', dice_coef, recall_at_thresholds,
                        precision_at_thresholds, auc_roc
                    ],
                    loss='binary_crossentropy',
                    dropout=0.05,
                    init='glorot_uniform',
                    two_output=False):
    filter_size = n_filter
    input_x = layers.Input(shape=input_shape,
                           name='Input_layer',
                           dtype='float32')
    #1 level
    x = layers.Conv3D(filters=filter_size,
                      kernel_size=(3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same')(input_x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv3D(filters=filter_size * 2,
                      kernel_size=(3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.MaxPooling3D(pool_size=(2, 2, 2), padding='same')(x)
    #2 level
    conv_list = []
    counter = 0
    for index, kernel_sizes in enumerate([[(1, 3, 3), (1, 1, 3)],
                                          [(3, 3, 3), (3, 1, 3)],
                                          [(3, 3, 1), (1, 3, 1)]]):
        for kernel_size in (kernel_sizes):
            x = layers.Conv3D(filters=(filter_size * 4),
                              kernel_size=kernel_size,
                              strides=(1, 1, 1),
                              padding='same',
                              name='Conv3D_%s' % (counter))(x)
            x = layers.BatchNormalization()(x)
            x = layers.LeakyReLU()(x)
            x = layers.SpatialDropout3D(dropout)(x)
            counter = counter + 1
        conv_list.append(x)
    x = layers.add(conv_list)
    x = layers.Conv3D(filters=filter_size * 8,
                      kernel_size=(3, 3, 3),
                      strides=(2, 2, 2),
                      padding='same')(x)
    x = layers.Reshape(target_shape=[4, -1, filter_size * 8])(x)
    x = layers.Conv2D(filters=filter_size * 8,
                      kernel_size=(1, 1296),
                      strides=(1, 1296))(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    #x = layers.SpatialDropout2D(dropout)(x)
    #x = layers.Lambda(squash)(x)
    #x = layers.Softmax()(x)
    x = layers.Reshape(target_shape=[filter_size * 8, -1])(x)
    x = layers.Conv1D(filters=2,
                      kernel_size=filter_size * 8,
                      strides=filter_size * 8,
                      activation='softmax')(
                          x)  #, kernel_regularizer=l2(0.001))(x)
    y = layers.Flatten()(x)
    #Classification
    model = Model(inputs=input_x, outputs=y)
    #keras.optimizers.SGD(lr=lr, momentum=0.90, decay=decay, nesterov=False)
    opt_noise = add_gradient_noise(optimizers.Adam)
    optimizer = opt_noise(
        lr, amsgrad=True)  #, nesterov=True)#opt_noise(lr, amsgrad=True)
    model.compile(optimizer=optimizer, loss=loss,
                  metrics=metrics)  #categorical_crossentropy
    return model
from collections import defaultdict
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.autograd import Variable
import time
from collections import defaultdict
import pickle
import torch.nn.functional as F
import numpy
from keras.layers import Dense
from keras.models import Model, Sequential
from keras_gradient_noise import add_gradient_noise
from keras.optimizers import Adam, SGD
from utils import *

NoisyAdam = add_gradient_noise(Adam)

label_dict = defaultdict(int, l=0, h=2)
int2label = {i: w for w, i in label_dict.items()}

write_intermediate_flag = 0


class arctic_dataset(Dataset):
    def __init__(self, tdd_file, feats_dir):

        self.tdd_file = tdd_file
        self.feats_dir = feats_dir
        self.labels_array = []
        self.feats_array = []
        f = open(self.tdd_file)
Esempio n. 3
0
model = models.Sequential()
model.add(
    Dense(128, kernel_initializer="glorot_normal", input_shape=(9 * 128, )))
model.add(LeakyReLU())
model.add(Dropout(.15))
model.add(BatchNormalization())
# model.add(GaussianNoise(1))
model.add(Dense(64))
model.add(LeakyReLU())
model.add(BatchNormalization())
model.add(Dense(6, activation='softmax'))

# In[85]:

# Let's use a different optimizer this time
noisy = add_gradient_noise(RMSprop)
model.compile(
    optimizer="Adamax",
    # model.compile(optimizer=noisy(),
    loss='categorical_crossentropy',
    metrics=['accuracy'])

# In[86]:

callbacks = [
    EarlyStopping(monitor='val_loss', patience=16),
    ModelCheckpoint(filepath='best_model.h5',
                    monitor='val_loss',
                    save_best_only=True)
]
Esempio n. 4
0
    m.compile(optimizer=normal,
              loss="categorical_crossentropy",
              metrics=["accuracy"])
    n_epochs = 150
    batch_size = 256
    history = m.fit(xtrain,
                    ytrain,
                    batch_size=batch_size,
                    epochs=n_epochs,
                    verbose=1)

    res = m.evaluate(xtest, ytest)
    eval_results_normal.append(res)
    m = get_model((784, ), 10)
    np.random.seed(7)
    noisy = add_gradient_noise(Adam)
    m.compile(optimizer=noisy(),
              loss="categorical_crossentropy",
              metrics=["accuracy"])
    history_noisy = m.fit(xtrain,
                          ytrain,
                          batch_size=batch_size,
                          epochs=n_epochs,
                          verbose=1)
    res = m.evaluate(xtest, ytest)
    eval_results_noisy.append(res)

    history_normal_loss.append(history.history["loss"])
    history_noisy_loss.append(history_noisy.history["loss"])

    history_normal_acc.append(history.history["acc"])
Esempio n. 5
0
    return m


print data.shape
data_len = data.shape[0]
feature_len = data.shape[1]  # girdi ve ciktini boyutu


def data_gen(d, w_size, k):
    l = d.shape[0]
    for i in range(w_size, l - w_size - k - 1):
        ret_X = []
        ret_y = []
        for idx in range(k):
            ret_X.append(d[i + idx:i + idx + w_size, :])
            ret_y.append(d[i + idx + w_size + 1])
        yield (np.array(ret_X), np.array(ret_y))


m = get_model((window_size, feature_len), feature_len)
print m.summary()
from keras.optimizers import RMSprop
rms = add_gradient_noise(RMSprop)
m.compile(optimizer=rms(),
          loss="categorical_crossentropy",
          metrics=["accuracy"])

m.fit_generator(data_gen(data, window_size, k), steps_per_epoch=90, epochs=50)
m.save("model")
# model fit et ve agirliklari sakla
Esempio n. 6
0
from keras.models import load_model
from keras import optimizers
from keras_gradient_noise import add_gradient_noise
noisy = add_gradient_noise(optimizers.RMSprop)
from sklearn.preprocessing import OneHotEncoder
from config import window_size, feature_len
import numpy as np

m = load_model("model", custom_objects={"NoisyRMSprop": noisy})
number_of_notes = 50
rand = np.random.randint(0, feature_len, size=[window_size])
ohe = OneHotEncoder(n_values=feature_len, sparse=False)

music = []
music.extend(list(rand))
for i in range(number_of_notes):
    a = np.array(music[i:i + window_size]).reshape([-1, 1])
    rand = ohe.fit_transform(a)
    pred = m.predict(rand.reshape([1, window_size, feature_len]))
    music.append(np.argmax(pred))

music = music[window_size:]
with open("classes.txt", "r") as f:
    classes = f.readlines()

# one hot decode yap
# sonra label decode yap
# karsilik gelen note ve chordlardan stream olustur
# stream'i midi dosyasina yaz
# kaydet
labels = []