Beispiel #1
0
""" Root Mean Squared Error
Accepts a list of predictions and a list of targets """


def rmse(preds, targets):
    dif = [((p - targets[i]) ** 2) for i, p in enumerate(preds)]
    mean = float(sum(dif)) / len(targets)
    root = math.sqrt(mean)
    return root


# time_start
timestart = time.time()

# trainset, testset = split_dataset(dataset) #it need shuffle##TODO
trainset, testset = load_imdb('datasets/dataset.pkl')


# train
train_data = trainset[0]
train_target = trainset[1]
class_num = len(unique(train_target))
print ("class num:", class_num)

# set the layers parm,and the max is 8 now
layers = [13, 20, 10, 5, class_num]  # first 10 is the 95mfcc's first 10 dim
# run the dnn ,first autoencoder and then DNNRegressor
autoencoder = dnn.AutoEncoder(train_data, train_data, train_target, layers, hidden_layer="SigmoidLayer",
                              final_layer="SoftmaxLayer", compression_epochs=1, bias=True, autoencoding_only=False,
                              dropout_on=True)
print("1here is okay")  # ============================================================
Beispiel #2
0
from keras.layers.convolutional import Convolution1D, MaxPooling1D


np.random.seed(1337)  # for reproducibility
# set parameters:
max_features = 100
maxlen = 60
batch_size = 32
embedding_dims = 60
nb_filter = 10
filter_length = 3
hidden_dims = 20
nb_epoch = 3
print("Loading data...")
(X_train, y_train), (X_test, y_test) = load_imdb(imdb_dataset="datasets/6singers_13mfcc_nolink.pkl",
                                                 nb_words=max_features,
                                                 test_split=0.2)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
model.add(Dropout(0.25))
# we add a Convolution1D, which will learn nb_filter