Example #1
0
def test_clone_sequential_model():
    seq = Sequential()
    seq.add(Dense(8, input_shape=(3,)))
    seq.compile(optimizer='sgd', loss='mse')

    clone = clone_model(seq)
    clone.compile(optimizer='sgd', loss='mse')

    ins = np.random.random((4, 3))
    y_pred_seq = seq.predict_on_batch(ins)
    y_pred_clone = clone.predict_on_batch(ins)
    assert y_pred_seq.shape == y_pred_clone.shape
    assert_allclose(y_pred_seq, y_pred_clone)
Example #2
0
def opt_nn():
    from keras.models import Sequential
    from keras.layers import Dense, Activation, Flatten, Dropout
    from keras.layers.convolutional import Convolution2D, MaxPooling2D
    from keras.layers import Embedding
    from keras.layers.recurrent import LSTM
    #Code for Convolutional Neural Network (it doesn't work)

    # First we have to initialize the neural network using Sequential()
    #cnn = Sequential()
    # process the data to fit in a keras CNN properly
    # input data needs to be (N, C, X) - shaped where
    # N - number of samples
    # C - number of channels per sample
    # X - sample size

    #cnn.add(Convolution1D(64, 4,
    #    border_mode="same",
    #    activation="relu",
    #    input_shape=(1, 4)))
    #cnn.add(Convolution1D(64, 2, border_mode="same"))
    #cnn.add(Convolution1D(64, 1, border_mode="same"))
    #cnn.add(Flatten())
    #cnn.add(Dense(256, activation="relu"))
    #cnn.add(Dropout(0.5))
    #cnn.add(Dense(32, activation="relu"))
    #cnn.add(Dense(1, activation="softmax"))
    #cnn.compile(loss="mse", optimizer="rmsprop")

    #Code for LTSM RNN
    model = Sequential()
    model.add(LSTM(64,input_dim=4, return_sequences=False, activation='tanh'))
    model.add(Dense(128))
    model.add(Dense(64, init='normal', activation='tanh'))
    model.add(Dense(4, init='normal', activation='tanh'))
    model.add(Dense(1, init='normal'))
    model.compile(loss='mse', optimizer='rmsprop')

    colors = np.vstack([quasar_table['PSFMAG_%d' % f]-quasar_table['PSFMAG_%d' % (f+1)] for f in range(0,4)]).T
    color_train = colors[::5]
    color_test = colors[::18]
    batch_size = len(z_train)
    model.fit(color_train.reshape(-1,1,4), z_train, batch_size=batch_size, nb_epoch=300, verbose=0, validation_split=0.5)
    predicted_output = model.predict_on_batch(color_test.reshape(-1,1,4))
    rms_lstm = np.mean(np.sqrt((z_test-predicted_output)**2))
    plt.scatter(z_test,predicted_output, color='k', s=0.1)
    plt.plot([-0.1, 6], [-0.1, 6], ':k')
    plt.text(0.04, 5, "rms = %.3f" % (rms_lstm))
    plt.xlabel('$z_{true}$')
    plt.ylabel('$z_{fit}$')
class LSTM_RNN:

    def __init__(self, look_back, dropout_probability = 0.2, init ='he_uniform', loss='mse', optimizer='rmsprop'):
        self.rnn = Sequential()
        self.look_back = look_back
        self.rnn.add(LSTM(10, stateful = True, batch_input_shape=(1, 1, 1), init=init))
        self.rnn.add(Dropout(dropout_probability))
        self.rnn.add(Dense(1, init=init))
        self.rnn.compile(loss=loss, optimizer=optimizer)

    def batch_train_test(self, trainX, trainY, testX, testY, nb_epoch=150):
        print('Training LSTM-RNN...')
        for epoch in range(nb_epoch):
            print('Epoch '+ str(epoch+1) +'/{}'.format(nb_epoch))
            training_losses = []
            testing_losses = []
            for i in range(len(trainX)):
                y_actual = trainY[i]
                for j in range(self.look_back):
                    training_loss = self.rnn.train_on_batch(np.expand_dims(np.expand_dims(trainX[i][j], axis=1), axis=1),
                                                       np.array([y_actual]))
                    training_losses.append(training_loss)
                self.rnn.reset_states()

            print('Mean training loss = {}'.format(np.mean(training_losses)))

            mean_testing_loss = []
            for i in range(len(testX)):
                for j in range(self.look_back):
                    testing_loss = self.rnn.test_on_batch(np.expand_dims(np.expand_dims(testX[i][j], axis=1), axis=1),
                                                          np.array([testY[i]]))
                    testing_losses.append(testing_loss)
                self.rnn.reset_states()

                for j in range(self.look_back):
                    y_pred = self.rnn.predict_on_batch(np.expand_dims(np.expand_dims(testX[i][j], axis=1), axis=1))
                self.rnn.reset_states()

            mean_testing_loss = np.mean(testing_losses)
            print('Mean testing loss = {}'.format(mean_testing_loss))
        return mean_testing_loss
Example #4
0
class speechLSTM:
    # Initializing the LSTM Model
    def __init__(self):
       self.prevData = 30
       self.batchsize=200
       self.model = Sequential()

    def build_nnet(self):
       self.model.add(LSTM(300,return_sequences=True, stateful=True,
                      batch_input_shape=(self.batchsize, self.prevData, 2)))
       self.model.add(Activation("linear"))
       self.model.add(Dropout(0.5))
       # self.model.add(LSTM(400,return_sequences=True,stateful=True))
       # self.model.add(Activation("linear"))
       # self.model.add(Dropout(0.5))
       # self.model.add(LSTM(500, return_sequences=True, stateful=True))
       # self.model.add(Activation("linear"))
       # self.model.add(Dropout(0.5))
       self.model.add(LSTM(400, return_sequences=True, stateful=True))
       self.model.add(Activation("relu"))
       # self.model.add(Dropout(0.5))
       # self.model.add(LSTM(700, return_sequences=True, stateful=True))
       # self.model.add(Activation("linear"))
       self.model.add(LSTM(500, return_sequences=False, stateful=True))
       self.model.add(Activation("linear"))
       self.model.add(Dropout(0.5))
       self.model.add(Dense(1, activation='sigmoid'))
       self.model.compile(loss='binary_crossentropy', optimizer='adadelta')


    def load_data_file(self):
        outputdata = []                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                         
        for f in gb.glob("/media/vyassu/OS/Users/vyas/Documents/Assigments/BigData/AudioData/KL/*.wav"):
            frate, inputdata = sc.read(f)
            pitch=lp.getPitch(f,frate)
            emotion = ""
            loudness = abs(an.loudness(inputdata))
            filename = f.split("/")[-1].split(".")[0]
            if filename[0] == "s":
                emotion = filename[0:2]
                emotion = float(int(hashlib.md5(emotion).hexdigest(), 16))
            else:
                emotion = filename[0]
                emotion =  float(int(hashlib.md5(emotion).hexdigest(), 16))
            outputdata.append(list([loudness,pitch, emotion]))

        return outputdata

    def get_train_test_data(self,data,percent_split):
        ftestList,ltestlist,fvalidList,lvalidList,ftrainList,ltrainList=[],[],[],[],[],[]
        noOfTrainSamples = len(data)*(1-percent_split)

        noOfTestSamples = len(data)-noOfTrainSamples
        self.batchsize = int(noOfTestSamples)

        noOfTrainSamples = int((noOfTrainSamples - self.prevData)/noOfTestSamples)

        for i in range(int(noOfTrainSamples)*self.batchsize):
            #ltrainList.append(data.iloc[i:i+self.prevData, 2:].as_matrix())
            ftrainList.append(data.iloc[i:i+self.prevData, 0:2].as_matrix())
        ltrainList = data.iloc[0:int(noOfTrainSamples)*self.batchsize, 2:].values

        for i in range(self.batchsize):
            fvalidList.append(data.iloc[i:i + self.prevData, 0:2].as_matrix())
        lvalidList = data.iloc[0:self.batchsize, 2:].values

        randNum = random.randint(0,noOfTrainSamples)

        for i in range(randNum,randNum+self.batchsize):
            ftestList.append(data.iloc[i:i + self.prevData, 0:2].as_matrix())
        ltestlist = data.iloc[randNum: randNum+self.batchsize, 2:].values

        return np.array(ftestList),np.array(ltestlist),np.array(ftrainList),np.array(ltrainList),np.array(fvalidList),np.array(lvalidList)

    def trainNNet(self,data_,label_,valid_data,valid_label):
        data = data_/data_.max(axis=0)
        label = label_/label_.max(axis=0)
        valid_data = valid_data/valid_data.max(axis=0)
        valid_label = valid_label/valid_label.max(axis=0)
        self.model.fit(data, label, batch_size=self.batchsize, nb_epoch=5,validation_data=(valid_data,valid_label),show_accuracy=True,shuffle=False)

    def predict(self,ftest_,ltest_):
        ltest=ltest_/ltest_.max(axis=0)
        ftest=ftest_/ftest_.max(axis=0)
        count=0
        predcited_data= self.model.predict_on_batch(ftest)
        print ("Score:",self.model.evaluate(ftest,ltest, show_accuracy=True))
        for i in range(len(predcited_data)):
            if predcited_data[0][i]==ltest[0][i]:
                count+=1
        print ("No of element Matching:",count)
        print ("No of dissimilar elements:",len(predcited_data[0])-count)
        print predcited_data
        print ltest

    def saveModel(self):
        self.model.save_weights("/media/vyassu/OS/Users/vyas/Documents/Assigments/BigData/", overwrite=False)

    def getIntermediateLayer(self):
        get_3rd_layer_output = K.function([self.model.layers[0].input],
                                          [self.model.layers[3].get_output(train=False)])
        #layer_output = get_3rd_layer_output[0]
        print K.get_value(get_3rd_layer_output)
		checkpoint = ModelCheckpoint("./saved/weights."+str(i)+"{epoch:02d}-{val_loss:.2f}.hdf5", 
					verbose = 2, monitor='loss', save_best_only=False, mode='auto')

		model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=n_epoch, validation_split=0.2, 
					verbose=2, shuffle='batch', callbacks=[checkpoint])

# Generation mode
else:
	model.load_weights(savingFileName)

	matrices = getStateMatrices(getKeepActivated)

	x_comp, y_comp = getNextBatch(matrices, 1, n_timesteps)	# x_comp is 1 x n_timesteps x n_input

	for i in x_comp[0]:
		print i
	print 

	composition = np.copy(x_comp[0])						# Composition is n_timesteps x n_input

	for i in range(composition_size):
		pred = model.predict_on_batch(x_comp)				# Predict takes a None x n_timesteps x n_input and returns a n_input
		pred = tresholdActivation(pred)
		composition = np.vstack((composition, pred))		# Composition becomes len(composition)+1 x n_input
		del x_comp
		x_comp = np.asarray([composition[-n_timesteps:]])	# Keep only the n_timesteps last


	composition = unflattenStateMatrix(composition[n_timesteps:], getKeepActivated)
	noteStateMatrixToMidi(composition, output_file)			# Reconstitute midi file
Example #6
0
    
    print("Training =>")
    train_pred_label = []
    avgLoss = 0
    	
    bar = progressbar.ProgressBar(max_value=len(train_x))
    for n_batch, sent in bar(enumerate(train_x)):
        label = train_label[n_batch]
        label = np.eye(n_classes)[label][np.newaxis,:]
        sent = sent[np.newaxis,:]
        
        if sent.shape[1] > 1: #some bug in keras
            loss = model.train_on_batch(sent, label)
            avgLoss += loss

        pred = model.predict_on_batch(sent)
        pred = np.argmax(pred,-1)[0]
        train_pred_label.append(pred)

    avgLoss = avgLoss/n_batch
    
    predword_train = [ list(map(lambda x: idx2la[x], y)) for y in train_pred_label]
    con_dict = conlleval(predword_train, groundtruth_train, words_train, 'r.txt')
    train_f_scores.append(con_dict['f1'])
    print('Loss = {}, Precision = {}, Recall = {}, F1 = {}'.format(avgLoss, con_dict['r'], con_dict['p'], con_dict['f1']))
    
    
    print("Validating =>")
    
    val_pred_label = []
    avgLoss = 0
model.add(Dense(8, activation='relu'))
#model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))

batch_size = 32


#model.compile(loss='binary_crossentropy',
model.compile(loss='categorical_crossentropy',
      optimizer='rmsprop',
      metrics=['accuracy'])

model.fit(X_train, Y_train, nb_epoch=100, batch_size=batch_size, shuffle=True)
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
print "score= ", score

y = model.predict_on_batch(X_test)
#y = y.reshape(np.prod(y.shape))

print y.shape, Y_test.shape
print "y[0], Y_test[0]= ", y[0], Y_test[0]

y1 = np.where(y < 0.5, 0, 1)
print "y1[0]= ", y1[0]

for i in xrange(len(y)):
	print y1[i], Y_test[i]

print "y-Y_test= ", (y1 - Y_test)
print "nb errors: ", (np.abs(y - Y_test)).sum()
Example #8
0
# Формируем модель сети
model = Sequential()
model.add(Embedding(2*datadim, 256, input_length=None))
model.add(LSTM(output_dim=128, activation='tanh', inner_activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='rmsprop')
model.summary()
model.fit(x_train_all, y_train_all, batch_size=128, nb_epoch=10)

file_name_weights = 'C:\\Python34\\Puzzle15\\utils\\Solutions\\weights'
file_name_model = 'C:\\Python34\\Puzzle15\\utils\\Solutions\\model'
model.save_weights(file_name_weights, overwrite=True)
json_string = model.to_json()
with open(file_name_model, 'w') as outfile:
    json.dump(json_string, outfile)
outfile.close()

# Печать на экран предсказаний только что созданной модели сети
for i in range(20):
    X = x_val_all[i]
    X.shape = (-1, 2*datadim)
    print('X - ', X)
    prediction = model.predict_on_batch(X)
    print('Prediction', prediction)
    print('Y val - ', y_val_all[i])
                            

Example #9
0
    deep_NN.add( Activation('relu') )
# output layer
deep_NN.add( Dense(num_labels, W_regularizer=l2(0.01), init='glorot_uniform') )
deep_NN.add( Activation('linear') )

# initialize stochastic gradient descent
#sgd = SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)
#rms_prop = RMSprop(lr=0.0001, rho=0.9, epsilon=1e-06)
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) 

deep_NN.compile(loss='mse', optimizer= adam, metrics=['accuracy'])
# train
hist = deep_NN.fit(xTrain, yTrain, batch_size=1000, nb_epoch=10, validation_data=(xTest, yTest), verbose=1, shuffle=True)

sys.stdout.write("xTrain type: " + str(type(xTrain))  + "\n")
y_pred = deep_NN.predict_on_batch(xTrain[1:1000])

'''
# saving history
sys.stdout.write('history: saving to file')
with open('../training/results:(2hl,200u,200ep).csv', 'wb') as f:
    t_steps = len(hist.history['acc'])
    writer = csv.writer(f, delimiter=',' )
    writer.writerow( ('loss', 'acc', 'val_loss', 'val_acc')  )
    for i in range(t_steps):
        loss = hist.history['loss'][i]
        acc = hist.history['acc'][i]
        val_loss = hist.history['val_loss'][i]
        val_acc = hist.history['val_acc'][i]
        writer.writerow( (loss, acc, val_loss, val_acc) )    
    f.close()
Example #10
0
class CnnLstm(object):
    """docstring for cnn-lstm"""
    def __init__(self, conf):
        self.vs = conf["vocab_size"]
        self.ml = conf["maxlen"]
        self.bs = conf["batch_size"]
        self.ed = conf["embedding_dims"]
        self.nf = conf["nb_filter"]
        self.fl = conf["filter_length"]
        self.hs = conf["hidden_size"]
        self.ep = conf["nb_epoch"]
        self.pl = conf["pool_length"]
        self.sm = conf.get("save_model", "models/default.cnnlstm")
        self.lm = conf.get("load_model", "models/default.cnnlstm")
        self.do = conf.get("dropout",0.2)
        self.model = Sequential()

    def build_net(self):
        model_1 = Sequential()
        model_2 = Sequential()
        model_1.add(Embedding(self.vs,
                    self.ed,
                    input_length=self.ml,
                    dropout=self.do))
        model_1.add(Convolution1D(nb_filter=self.nf,
                        filter_length=self.fl,
                        border_mode='valid',
                        activation='relu',
                        subsample_length=1))
        model_1.add(Lambda(max_1d, output_shape=(self.nf, )))
        model_2.add(Embedding(self.vs,
                    self.ed,
                    input_length=self.ml,
                    dropout=self.do))
        model_2.add(Convolution1D(nb_filter=self.nf,
                        filter_length=self.fl,
                        border_mode='valid',
                        activation='relu',
                        subsample_length=1))
        model_2.add(Lambda(max_1d, output_shape=(self.nf, )))
        self.model.add(Merge([model_1, model_2],mode='concat'))
        print self.model.output_shape
        self.model.add(Reshape((2, self.nf), input_shape=(self.nf*2,)))
        self.model.add(LSTM(self.hs))
        self.model.add(Dense(self.hs))
        self.model.add(Dropout(self.do))
        self.model.add(Dense(12))
        self.model.add(Activation("softmax"))
        self.model.compile(loss='categorical_crossentropy',
                        optimizer='adam')
        print "Network compile completed..."

    def save(self):
        f = open(self.sm, "w")
        f.write(self.model.to_json())
        f.close()
        self.model.save_weights(self.sm+".weights", overwrite=True)

    def load(self):
        f = open(self.lm, "r")
        self.model = model_from_json(f.read())
        self.model.load_weights(self.sm+".weights")
        self.model.compile(loss='categorical_crossentropy',
                        optimizer='adam')
        print "Network compile completed..."


    def train(self, x, y, vx, vy, vvx=None, vvy=None):
        print "Begin to train ... training set: {0}, validation set: {1}".format(x[0].shape[0], vx[0].shape[0])
        ep = 0
        max_accuracy = 0
        while ep < self.ep:
            loss = 0
            cnt = 0
            accuracy = 0.0
            v_accuracy = 0.0
            v_im_accuracy = 0.0
            num_of_batch = int(len(y)/self.bs)
            idx_move = num_of_batch / 60
            for i in xrange(0, len(y), self.bs):
                x_ = [x[0][i:i+self.bs], x[1][i:i+self.bs]]
                y_ = y[i:i+self.bs]
                loss_ = self.model.train_on_batch(x_, y_)
                pred_ = self.model.predict_on_batch(x_)
                acc_ = 0.0
                for j in xrange(len(pred_)):
                    max_p = np.argmax(pred_[j])
                    correct = 0
                    acc_ += (y_[j][max_p] > 0)

                acc_ /= len(pred_)
                accuracy += acc_
                # print acc_
                loss += loss_
                cnt += 1
                #sys.stdout.flush()
                if cnt % idx_move == 0:
                    sys.stderr.write("=>\b")
                    sys.stderr.flush()
            print ">"
            if vvx != None:
                vv_pred = self.model.predict_on_batch(vvx)
                for j in xrange(len(vv_pred)):
                    max_vp = np.argmax(vv_pred[j])
                    v_im_accuracy += (vvy[j][max_vp] > 0)
                v_im_accuracy /= len(vv_pred)

            v_pred = self.model.predict_on_batch(vx)
            for j in xrange(len(v_pred)):
                max_p = np.argmax(v_pred[j])
                v_accuracy += (vy[j][max_p] > 0)
            v_accuracy /= len(v_pred)

            if v_im_accuracy > max_accuracy:
                print "Model imporved on validation set, save model ..."
                self.save()
                max_accuracy = v_accuracy
            ep += 1
            print "Epoch {0}, training loss {1}, train-accuracy {2}, valid-accuracy {3}, valid_im-accuracy {4}".format(
                ep, loss / cnt, accuracy / cnt, v_accuracy, v_im_accuracy)
            sys.stdout.flush()
Example #11
0
model_1.add(layer_a1)
model_2.add(layer_a2)



model_1.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model_2.compile(loss='categorical_crossentropy', optimizer='rmsprop')

'''
Starting testing
'''

import cPickle as pickle

print "Starting ..." 

ej_case = 'EJ_case.pkl'
question_input, ans1_input, ans2_input, ans3_input, ans4_input, ans5_input, image_input, solution = pickle.load(open(ej_case,'rb'))

print ans1_input.shape
print solution.shape

for n in range(10):
    print "model_2", model_2.predict_on_batch(ans1_input)
    print "model_1", model_1.predict_on_batch(ans1_input)
    print model_1.train_on_batch(ans1_input , solution)

print model_2.predict_on_batch(ans1_input)
print model_1.predict_on_batch(ans1_input)
Example #12
0
 
 gps.subscribe(stater.updategps)
 clock.subscribe(stater.updateclock)
 irL.subscribe(stater.updateirL)
 irR.subscribe(stater.updateirR)
 irC.subscribe(stater.updateirC)
 simu.sleep(1.1)
 init_time =  stater.currenttime
 prev_clock_tick = init_time
 currenttime = 0
 state = stater.state()
 prewards = []
 pactions = []
 pstates = np.zeros((0,state.size))
 pstates = np.concatenate((pstates, state))
 guess = model.predict_on_batch(pstates.reshape(1, pstates.shape[0], pstates.shape[1]))
 action, value = policy(guess[0][0, -1, :] * 300)#########
 pactions.append(action)
 
 truedonut = 0
 num_runs = 0
 niet = 0
 
 now = simu.time()
 
 while not award.doquit:
   niet += 1
   
   lel = False
   if currenttime >= 300:
     award.failed = True
Example #13
0
    test_batches = list(
        BatchIterator(X_test, Y_train, batch_size, IMAGE_SIZE)
    )  # we only use X_test and Y_train, Y_train is a  dummy arg
    progbar = generic_utils.Progbar(len(X_test))  # add progress bar since it takes a while
    preds = []
    for X_batch, Y_batch in test_batches:  # X_test:filenames, A_batch: annotation
        X_batch_image = []
        for image_path in X_batch:
            # load pre-processed test images from filenames
            processed_img_arr = cv2.imread(DATA_DIR_PATH + "/" + image_path)
            X_batch_image.append(processed_img_arr.reshape(3, IMAGE_SIZE, IMAGE_SIZE))
        # convert to ndarray
        X_batch_image = np.array(X_batch_image)
        X_batch_image = X_batch_image.astype("float32")
        X_batch_image /= 255
        preds_batch = model.predict_on_batch(X_batch_image)
        progbar.add(batch_size, values=[])
        preds += list(preds_batch)
    preds = np.array(preds)
    print("Saving prediction result...")
    with open("bin/head_%dx%d_noda_preds.bin" % (IMAGE_SIZE, IMAGE_SIZE), "w") as fid:
        pickle.dump(preds, fid)

    # create a submission file
    export_to_csv(preds, filenames, "data/head_%dx%d_noda.csv" % (IMAGE_SIZE, IMAGE_SIZE))


# ============================================
#  allocate memory for all images first [OLD]
# ============================================
else:
Example #14
0
'''
Starting testing
'''

import cPickle as pickle

print "Starting ..." 

ej_case = 'EJ_case.pkl'
question_input, ans1_input, ans2_input, ans3_input, ans4_input, ans5_input, image_input, solution = pickle.load(open(ej_case,'rb'))

image_v    = np.array([ image_input[0][0] ])
print solution.shape

temp = np.hstack(( question_input[0][0], ans1_input[0][0] , ans2_input[0][0] ,ans3_input[0][0] ,ans4_input[0][0] ,ans5_input[0][0] ))
temp = np.array([[temp]])
print temp[0].shape
print image_v.shape
input_data = np.hstack(( image_v , temp[0] ))
print input_data.shape


for n in range(100):
    #print "model_1", model.predict_on_batch( [ ans1_input , image_input] , solution )
    print model.train_on_batch( [ input_data , image_v] , solution )
    #print model_1.train_on_batch(ans1_input , solution)

print model.predict_on_batch( [ input_data , image_v ])
#print model_1.predict_on_batch(ans1_input)
Example #15
0
def main():
	srcImg = cv2.imread(image_training,cv2.IMREAD_GRAYSCALE)
	tgtImg = cv2.imread(image_expecting,cv2.IMREAD_GRAYSCALE)
	valImg = cv2.imread(image_validating,cv2.IMREAD_GRAYSCALE)
	rows = int(srcImg.shape[0] / img_size)
	columns = int(srcImg.shape[1] / img_size)
	losses = []
	metric = []
	accuracies = []
	num_of_epochs = []
	setTrain = None
	setTarget = None

	# Preparing training data.... 
	print ("Preparing training data....")
	for i in range(0, train_samples):
		r = random.randint(0, rows - 1)
		c = random.randint(0, columns - 1)
		
		y = r * img_size
		x = c * img_size
		h = img_size
		w = img_size
		
		srcTile = srcImg[y:y+h, x:x+w]
		tgtTile = tgtImg[y:y+h, x:x+w]
		
		trainIn = img_to_array(srcTile)    
		trainIn = trainIn.reshape(1,numNeurons)
		trainIn = np.apply_along_axis(prepareInput, 1, trainIn)

		trainOut = img_to_array(tgtTile)
		trainOut = trainOut.reshape(1,numNeurons)
		trainOut = np.apply_along_axis(prepareInput, 1, trainOut)
		
		if setTrain is None:
			setTrain = trainIn
		else:
			setTrain = np.vstack((setTrain, trainIn))
		
		if setTarget is None:
			setTarget = trainOut
		else:
			setTarget = np.vstack((setTarget, trainOut))

	# setting up the dnn model (fully connected feed forward dnn)
	model = Sequential()
	model.add(Dense(numNeurons, activation=activationFunction, input_shape=(numNeurons,), use_bias=True, bias_initializer='zeros', kernel_initializer=initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=42)))
	model.add(Dense(numNeurons, activation=activationFunction, input_shape=(int(numNeurons),), use_bias=True, bias_initializer='zeros', kernel_initializer=initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=42))) 
	model.add(Dense(numNeurons, activation=activationFunction, input_shape=(int(numNeurons),), use_bias=True, bias_initializer='zeros', kernel_initializer=initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=42))) 
	model.add(Dense(numNeurons, activation=activationFunction, input_shape=(numNeurons,), use_bias=True, bias_initializer='zeros', kernel_initializer=initializers.RandomUniform(minval=-0.5, maxval=0.5, seed=42)))
	model.summary()

	sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
	model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy', metrics.binary_accuracy])

	# initialization magic for the ui plot
	plt.ion()

	ls = DynamicPlot()
	ls()

	#let's train the model
	cnt = 0
	for i in range(0, num_iterations): 
		history = model.fit(setTrain, setTarget,
					batch_size=batch_size,
					epochs=epochs,
					verbose=0,
					validation_data=(setTrain, setTarget))

		score = model.evaluate(setTrain, setTarget, verbose=0)
		cnt = cnt + epochs
		
		customScore = 0
		p = model.predict_on_batch(setTrain)
		
		a = setTrain.flatten()
		b = p.flatten()
		
		for j in range(0, a.size):
			customScore = customScore + (1- abs(a[j] - b[j]))
		
		customAccuracy = float(customScore) / a.size
		
		num_of_epochs.append(cnt)
		losses.append(score[0])
		metric.append(score[2])
		accuracies.append(customAccuracy)
		ls.drawPlot(np.asarray(num_of_epochs), np.asarray(losses),  np.asarray(metric), np.asarray(accuracies))
		
		print('Loss:', score[0])
		print('Metrics:', score[2])
		print ('Accuracy', customAccuracy)
		print('evaluating next iteration: ', i)


	#let's run a final prediction on another image for validation purposes

	#  Preparing input data for validation prediction....
	print ("Preparing input data for validation prediction....")

	setResult = None
	rows = int(valImg.shape[0] / img_size)
	columns = int(valImg.shape[1] / img_size)

	print(rows, columns)
	 
	for r in range(0, rows) :
		for c in range(0, columns):
			y = r * img_size
			x = c * img_size
			h = img_size
			w = img_size
			
			srcTile = valImg[y:y+h, x:x+w]
			srcIn = img_to_array(srcTile)    
			srcIn = srcIn.reshape(1,numNeurons)
			srcIn = np.apply_along_axis(prepareInput, 1, srcIn)
			if setResult is None:
				setResult = srcIn
			else:
				setResult = np.vstack((setResult, srcIn))

	print('Predicting....')
	result = model.predict_on_batch(setResult)
	s = np.shape(result)
	print(s)

	# preparing image for display
	print ('Preparing image for display')
	i = 0
	for r in range(0, rows):
		print('proccesing row: ', r)
		for c in range(0, columns):
			resMat = np.asmatrix(result[i])
			resMat = resMat.reshape(img_size,img_size)
			for x in range(0, img_size):
				for y in range(0, img_size):
					valImg[x + r * img_size,y + c * img_size] = int(255 * resMat[x,y])
			i = i + 1
	print('Calculations complete! Result image might not be visible, see taskbar. Hit enter in image to terminate run.')
			
	cv2.imshow('Result',valImg)
	cv2.waitKey(0) & 0xFF # see https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_image_display/py_image_display.html

	st = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
	directory = output_path + st

	# store the parameters of the trained network for later purposes
	if not os.path.exists(directory):
		os.makedirs(directory)

	# save the validation image
	resImage = directory + '\\result.png'
	cv2.imwrite(resImage, valImg)
	cv2.destroyAllWindows()

	modelFile = directory + '\\model.json'

	modelJson =  model.to_json()
	f = open(modelFile, 'w')
	f.write(modelJson)
	f.close()

	modelH5 = directory + '\\model.h5'
	model.save(modelH5)
Example #16
0
model.add(Activation('sigmoid'))
model.add(Dense(output_dim=1))


#编译模型
adagrad = Adagrad(lr=0.3, epsilon=1e-06)
model.compile(loss='mean_squared_error',
              optimizer=adagrad)

#准备数据
xvalue=numpy.asarray([numpy.arange(0,2,0.0002)]).T
yvalue=numpy.sin(xvalue*2*numpy.pi)*0.8
num=len(xvalue)
trainset= (numpy.array([xvalue[i] for i in range(0,num,3)]),numpy.array([yvalue[i] for i in range(0,num,3)]))
validset= (numpy.array([xvalue[i] for i in range(1,num,9)]),numpy.array([yvalue[i] for i in range(1,num,9)]))

#训练模型
info=model.fit(trainset[0], trainset[1],
          nb_epoch=400,
          batch_size=16,
          validation_data=validset,
          callbacks=[EarlyStopping(patience=10,verbose=1)],
          show_accuracy=True)
          
#绘制图表
r_val=model.predict_on_batch(xvalue)
fig,(ax1)=plt.subplots(1,1,True,True)       
ax1.plot(xvalue,r_val)                  
ax1.plot(xvalue.reshape((xvalue.size,)),yvalue.reshape((yvalue.size,)))                      
plt.show() 
Example #17
0
class SCNN(object):
    """docstring for CNN"""
    def __init__(self, conf):
        self.vs = conf["vocab_size"]
        self.ml = conf["maxlen"]
        self.bs = conf["batch_size"]
        self.ed = conf["embedding_dims"]
        self.nf = conf["nb_filter"]
        self.fl = conf["filter_length"]
        self.hs = conf["hidden_size"]
        self.ep = conf["nb_epoch"]
        self.sm = conf.get("save_model", "models/default.scnn")
        self.lm = conf.get("load_model", "models/default.scnn")
        self.do = conf.get("dropout",0.2)
        self.model = Sequential()

    def build_net(self):
        word_model_1 = Sequential()
        word_model_2 = Sequential()
        pos_model_1 = Sequential()
        pos_model_2 = Sequential()
        model_1 = Sequential()
        model_2 = Sequential()
        embedding_1 = Embedding(self.vs,
                    self.ed,
                    input_length=self.ml,
                    dropout=self.do)
        embedding_p1 = Embedding(44,
                    10,
                    input_length=self.ml,
                    dropout=self.do)        
        embedding_2 = copy.deepcopy(embedding_1)
        embedding_p2 = copy.deepcopy(embedding_p1)
        word_model_1.add(embedding_1)
        # print model_1.output_shape
        word_model_1.add(Lambda(ctn, output_shape=(self.ed * 3, )))
        pos_model_1.add(embedding_p1)
        pos_model_1.add(Lambda(ctn, output_shape=(10 * 3, )))
        model_1.add(Merge([word_model_1, pos_model_1], mode='concat'))
        # print model_1.output_shape
        word_model_2.add(embedding_2)
        word_model_2.add(Lambda(ctn, output_shape=(self.ed * 3, )))
        pos_model_2.add(embedding_p2)
        pos_model_2.add(Lambda(ctn, output_shape=(10 * 3, )))
        model_2.add(Merge([word_model_2, pos_model_2], mode='concat'))
        self.model.add(Merge([model_1, model_2], mode='concat'))
        # print self.model.output_shape
        # print self.model.output_shape
        self.model.add(Dropout(self.do))
        self.model.add(Dense(12))
        self.model.add(Activation("softmax"))
        self.model.compile(loss='categorical_crossentropy',
                        optimizer=Adam(
                            lr=0.1))
        print "Network compile completed..."

    def save(self):
        f = open(self.sm, "w")
        f.write(self.model.to_json())
        f.close()
        self.model.save_weights(self.sm+".weights", overwrite=True)

    def load(self):
        f = open(self.lm, "r")
        self.model = model_from_json(f.read())
        self.model.load_weights(self.sm+".weights")
        self.model.compile(loss='categorical_crossentropy',
                        optimizer='adam')
        print "Network compile completed..."

    def test(self, vx, vy, vvx=None, vvy=None, v_id=None, v_im_id=None):
        v_accuracy = 0.0
        v_im_accuracy = 0.0
        if vvx != None:
            vv_pred = self.model.predict_on_batch(vvx)
            for j in xrange(len(vv_pred)):
                max_vp = np.argmax(vv_pred[j])
                v_im_accuracy += (vvy[j][max_vp] > 0)
            v_im_accuracy /= len(vv_pred)

        v_pred = self.model.predict_on_batch(vx)
        for j in xrange(len(v_pred)):
            max_p = np.argmax(v_pred[j])
            if vy[j][max_p] > 0:
                v_accuracy += 1
            else:
                #print v_id[j], max_p, vy[j]
                pass
        v_accuracy /= len(v_pred)
        print "{0} valid-accuracy {1}, valid_im-accuracy {2}".format("Testing", v_accuracy, v_im_accuracy)
    def train(self, x, y, vx, vy, vvx=None, vvy=None, v_id=None, v_im_id=None):
        try:
            print "Begin to train ... training set: {0}, validation set: {1}".format(x[0].shape[0], vx[0].shape[0])
        except:
            print "Begin to train ... training set: {0}, validation set: {1}".format(x[0][0].shape[0], vx[0][0].shape[0])
        if vvx != None:
            try:
                print "Impilicit validation set: {0}".format(vvx[0].shape[0])
            except:
                print "Impilicit validation set: {0}".format(vvx[0][0].shape[0])
        ep = 0
        max_accuracy = 0
        while ep < self.ep:
            loss = 0
            cnt = 0
            accuracy = 0.0
            v_accuracy = 0.0
            v_im_accuracy = 0.0
            num_of_batch = int(len(y)/self.bs)
            idx_move = num_of_batch / 60
            for i in xrange(0, len(y), self.bs):
                x_ = [x[0][i:i+self.bs], x[1][i:i+self.bs], 
                    x[2][i:i+self.bs], x[3][i:i+self.bs]]
                y_ = y[i:i+self.bs]
                loss_ = self.model.train_on_batch(x_, y_)
                pred_ = self.model.predict_on_batch(x_)
                acc_ = 0.0
                for j in xrange(len(pred_)):
                    max_p = np.argmax(pred_[j])
                    correct = 0
                    acc_ += (y_[j][max_p] > 0)

                acc_ /= len(pred_)
                accuracy += acc_
                # print acc_
                loss += loss_
                cnt += 1
                if cnt % idx_move == 0:
                    sys.stderr.write("=>\b")
                    sys.stderr.flush()
            print ">"
            if vvx != None:
                vv_pred = self.model.predict_on_batch(vvx)
                for j in xrange(len(vv_pred)):
                    max_vp = np.argmax(vv_pred[j])
                    v_im_accuracy += (vvy[j][max_vp] > 0)
                v_im_accuracy /= len(vv_pred)

            v_pred = self.model.predict_on_batch(vx)
            for j in xrange(len(v_pred)):
                max_p = np.argmax(v_pred[j])
                if vy[j][max_p] > 0:
                    v_accuracy += 1
                else:
                    #print v_id[j], v_pred[j], vy[j]
                    pass
            v_accuracy /= len(v_pred)

            ep += 1
            print "Epoch {0}, training loss {1}, train-accuracy {2}, valid-accuracy {3}, valid_im-accuracy {4}".format(
                ep, loss / cnt, accuracy / cnt, v_accuracy, v_im_accuracy)
            
            if v_im_accuracy > max_accuracy:
                print "Model imporved on validation set, save model ..."
                #self.save()
                max_accuracy = v_accuracy
Example #18
0
def generate_lstm_gmm(seq, maxlen=1, bs=500, ep=2, output_iterations=10, num_mixture_components=3):
    # seq is a single sample, in the format (timesteps, features) !
    # TODO: expand code to support multiple samples, fed into model together as a batch
    # Cut the timeseries data (variable name 'seq') into semi-redundant sequence chunks of maxlen

    X = []
    y = []

    for i in range(0, len(seq) - maxlen):
        X.append(seq[i:i+maxlen])
        y.append(seq[i+maxlen])

    dim = len((X[0][0]))

    print("sequence chunks:", len(X))
    print("chunk width:", len(X[0]))
    print("vector dimension:", dim)
    print("number of mixture components:", num_mixture_components)
    print("batch size:", bs)

    X = np.array(X)
    y = np.array(y)
    
    # build the model: 2 stacked LSTM
    print('Build model...')
    model = Sequential()
    model.reset_states()
    model.add(LSTM((dim+2) * num_mixture_components, return_sequences=False, input_shape=(maxlen, dim)))
    model.add(Dense((dim+2) * num_mixture_components))
    
    model.add(GMMActivation(num_mixture_components))

    model.compile(loss=gmm_loss, optimizer=RMSprop(lr=0.001))

    # Train the model
    model.fit(X, y, batch_size=bs, nb_epoch=ep)

    # Generate timeseries
    x_seed = X[len(X)-1] #choose final in-sample data point to initialize model
    x_array = []
    x_array.append(x_seed)
    x = np.array(x_array)

    predicted = []
    for i in range(output_iterations):
        pred_parameters = model.predict_on_batch(x)[0]

        means = pred_parameters[:num_mixture_components * dim]
        sds = pred_parameters[(num_mixture_components * dim):(num_mixture_components * (dim+1))]
        weights = pred_parameters[(num_mixture_components * (dim + 1)):]

        print(means)
        print(sds)
        print(weights)

        means = means.reshape(num_mixture_components, dim)
        sds = sds[:, np.newaxis]
        weights = weights[:, np.newaxis]
        
        pred = weights * np.random.normal(means, sds)
        pred = np.sum(pred, axis=0)
        predicted.append(pred)

    return predicted
Example #19
0
    exit(0)

outfile = sys.argv[4]

if mode == "synth":
    print("loading audio input...")
    data, sample_rate = sf.read(audiofile)
    data = data[:,0]
    sf.write("test.wav", data, sample_rate)
    
    outbuf = []        
    
    print("synthesizing audio...")
    inp = data[:chunk_size * chunk_history]
    for i in range(10000):
        outp = model.predict_on_batch(np.array([inp]))
        outp = outp[0][0]
        outbuf += list(outp)
        #inp = data[i * chunk_size :\
        #    (i * chunk_size) + (chunk_size * chunk_history)]
        for j in range(chunk_size * (chunk_history - 1)):
            inp[j] = inp[j + chunk_size]
        for j in range(chunk_size):
            inp[j + (chunk_size * (chunk_history - 1))] = outp[j]
    
    print("writing audio output...")
    sf.write(outfile, np.array(outbuf), sample_rate)

elif mode == "test":
    print("not yet implemented")
else:
#            recs  = [r[-maxlen:] for r in recipes]
        
        #print(maxlen, maxWord,len(mh.char_indices),len(mh.word_indices))
        Xchar,Xword,Xcon,dummy    = helper.getCharAndWordNoState(recipes,contextVec,maxlen,maxWord,mh.char_indices,mh.word_indices,step=1,predict=True)
        newLength   = (Xchar.shape[0])/4        
        
        inds        = [(newLength*(divind+1))-1 for divind in range(0,batchSize)]
        #helper.checkExampleWords(Xword[inds[1]],mh.vocab)

        Xchar   = Xchar[inds]
        Xword   = Xword[inds]
        Xcon    = Xcon[inds]        
        
        

        preds       = model.predict_on_batch([Xchar,Xword,Xcon])[0]
        for d,pred in enumerate(preds):
            #print(d,pred)
            next_index  = helper.sample(pred, div[d])
            next_char   = mh.indices_char[next_index]
            
    
            if recipes[d][-1]  != "$":
                recipes[d] += next_char
        
        dropIt  = [(r[-1] == '$')*1. for r in recipes]
        if int(np.sum(dropIt)) == batchSize:
            break
    with open("../somerecipesfriend.txt",'a') as f:
        for d,rec in enumerate(recipes):
            print("with diversity:",div[d],"\n\n",rec,'\n\n')