def evaluate_timeseries_with_label(timeseries, labels, window_size): filter_length = 128 nb_filter = 64 nb_series = 1 nb_samples = timeseries.shape[0] print('\n\nTimeseries ({} samples by {} series):\n'.format(nb_samples, nb_series)) model = make_timeseries_regressor(window_size=window_size, filter_length=filter_length, nb_input_series=nb_series, nb_outputs=nb_series, nb_filter=nb_filter) print('\n\nModel with input size {}, output size {}, {} conv filters of length {}'.format(model.input_shape, model.output_shape, nb_filter, filter_length)) model.summary() test_size = int(0.2 * len(timeseries)) X = np.atleast_3d(timeseries) y = np.atleast_3d(labels) print('\nShape: {}: y:{}'.format(X.shape, y.shape)) #X_train, X_test, y_train, y_test = X[:-test_size], X[-test_size:], y[:-test_size], y[-test_size:] X_train, X_test, y_train, y_test = X[:], X[-test_size:], y[:], y[-test_size:] model.fit(X_train, y_train, epochs=30, batch_size=8, validation_data=(X_test, y_test)) model.save_weights('try5_keras2.hd5') # to load #model.load_weights('try5_keras2.hd5') pred = model.predict(X_test) save_plot(X_test, y_test, pred, 'try5_keras2.out', style='keras')
def evaluate_timeseries_with_label(timeseries, labels, window_size): filter_length = 128 nb_filter = 64 nb_series = 1 nb_samples = timeseries.shape[0] print('\n\nTimeseries ({} samples by {} series):\n'.format(nb_samples, nb_series)) model = make_timeseries_regressor(window_size=window_size, filter_length=filter_length, nb_input_series=nb_series, nb_outputs=nb_series, nb_filter=nb_filter) print('\n\nModel with input size {}, output size {}, {} conv filters of length {}'.format(model.input_shape, model.output_shape, nb_filter, filter_length)) model.summary() X = np.atleast_3d(timeseries) y = np.atleast_3d(labels) print('\nShape: {}: y:{}'.format(X.shape, y.shape)) test_size = int(0.1*len(timeseries)) train_size = int(0.9*len(timeseries)) #X_train, X_test, y_train, y_test = X[:-test_size], X[-test_size:], y[:-test_size], y[-test_size:] idx = np.random.choice(len(timeseries), len(timeseries), replace=False) train_idx = idx[:train_size] test_idx = idx[train_size:] X_train, X_test, y_train, y_test = X[train_idx, :], X[test_idx, :], y[train_idx, :], y[test_idx, :] #load model.load_weights('try5_keras3.hd5') #train #model.fit(X_train, y_train, epochs=30, batch_size=8, validation_data=(X_test, y_test)) #model.save_weights('try5_keras3.hd5') print("Beging predicting") pred = model.predict(X_test) #save(X_test, y_test, pred) save_plot(X_test*data_scale, y_test*label_scale, pred*label_scale, 'try5_keras3_load.out', style='keras')
def evaluate(x, y, save=False): model.eval() predicted = model(x) test_loss = F.mse_loss(predicted, y) print('\nTest set: Average loss: {:.6f}\n'.format(test_loss.item())) if save: x = x.cpu().numpy() y = y.cpu().numpy() predicted = predicted.detach().cpu().numpy() save_plot(x * data_scale, y * label_scale, predicted * label_scale, 'try9.out', style="keras") return test_loss.item()
plt.ylabel('average loss', fontsize=14) plt.show() params = net.collect_params() params.save('try3_ivi.params') def model_predict(net, data_iter): i = 0 X = [] y = [] p = [] for data,label in data_iter: data = data.as_in_context(ctx,) data = data.reshape((1, sequence_length, 1)) output = net(data) data = data.reshape((1, sequence_length)) X.append(data[0].asnumpy()) y.append(label[0].asnumpy()) p.append(output[0].asnumpy()) # since later we use _data directly if i == 0: print(data.shape, label.shape, output.shape) i += 1 return X,y,p X,y,predicted = model_predict(net, eval_iter) X = [a * data_scale for a in X] y = [a * label_scale for a in y] predicted =[a * label_scale for a in predicted] save_plot(X, y, predicted, 'try3_ivi.out')
output_list = ['v(pad)'] train_iter, val_iter, test_iter = build_iters(filename, input_list, output_list, args.splits, args.batch_size) input_shape = train_iter.provide_data[0][1] # Define net net = get_net(input_shape, args.filter_list, args.num_filters, args.dropout) print(net) ctx = mx.cpu() if args.gpus is None or args.gpus is '' else [ mx.gpu(int(i)) for i in args.gpus.split(',') ] params = net.collect_params() #print(params) params.initialize(mx.initializer.Uniform(0.01), ctx=ctx) loss = gluon.loss.HuberLoss(rho=0.1) #trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate':0.005}) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.005}) if args.model_file: assert (os.path.exists(args.model_file)) net = load(net, model_file=args.model_file) else: train(train_iter, val_iter, net, loss, trainer, ctx, args.num_epochs) # predict x_test, y_test, pred = predict(net, test_iter) save_plot(x_test, y_test, pred, 'try8_gluon2.out')
def model_predict(net, data_iter, batch_size, num_channel=1): i = 0 X = [] y = [] p = [] cumulative_loss = 0 for data, label in data_iter: data = data.as_in_context(ctx, ) label = label.as_in_context(ctx, ) output = net(data) # (n,w) loss = square_loss(output, label[:, :, 0]) cumulative_loss += nd.mean(loss).asscalar() print("iter %d, loss: %e" % (i, cumulative_loss)) X.append(np.squeeze(data[0].asnumpy())) y.append(np.squeeze(label[0].asnumpy())) p.append(output[0].asnumpy()) if i == 0: print(data.shape, label.shape, output.shape) i += 1 print("predict_loss: %e," % (cumulative_loss)) return X, y, p X, y, predicted = model_predict(net, eval_iter, batch_size) X = [a * data_scale for a in X] y = [a * label_scale for a in y] predicted = [a * label_scale for a in predicted] save_plot(X, y, predicted, 'try4.out')
for i, (data, label) in enumerate(data_iter): data = data.as_in_context(ctx) label = label.as_in_context(ctx) if data.shape[0] < batch_size: continue output = net(data) # (n,w) output = output.reshape((batch_size, sequence_length, 1)) loss = square_loss(output, label) cumulative_loss += nd.mean(loss).asscalar() print("iter %d, loss: %e" % (i, nd.mean(loss).asscalar())) denormalize(data, eval_data_scale[i * batch_size:(i + 1) * batch_size, :]) denormalize(label, eval_label_scale[i * batch_size:(i + 1) * batch_size, :]) denormalize(output, eval_label_scale[i * batch_size:(i + 1) * batch_size, :]) X.append(np.squeeze(data[0].asnumpy())) y.append(np.squeeze(label[0].asnumpy())) p.append(output[0].asnumpy()) if i == 0: print(data.shape, label.shape, output.shape) i += 1 print("cumulated_loss: %e," % (cumulative_loss)) return X, y, p X, y, predicted = model_predict(net, eval_iter, batch_size) # X: (n, w, c) save_plot(X, y, predicted, 'try4_ivpad.out')
plt.grid(True, which="both") plt.xlabel('epoch', fontsize=14) plt.ylabel('average loss', fontsize=14) plt.show() #net.save_params('try2_load2.params') params = net.collect_params() params.save('try2.params') def model_predict(net, data_iter): i = 0 X = [] y = [] p = [] for data,label in data_iter: data = data.as_in_context(ctx,) data = data.reshape((1, sequence_length, 1)) output = net(data) data = data.reshape((1, sequence_length)) X.append(data[0].asnumpy()) y.append(label[0].asnumpy()) p.append(output[0].asnumpy()) # since later we use _data directly if i == 0: print(data.shape, label.shape, output.shape) i += 1 return X,y,p X,y,predicted = model_predict(net, eval_iter) save_plot(X*data_scale, y*label_scale, predicted*label_scale, 'try2.out')