str(Data.train[1].shape)) log.info("Validation shape: X:%s Y:%s", str(Data.valid[0].shape), str(Data.valid[1].shape)) log.info("Testing shape: X:%s Y:%s", str(Data.test[0].shape), str(Data.test[1].shape)) if lstnet_init.plot == True and lstnet_init.autocorrelation is not None: AutoCorrelationPlot(Data, lstnet_init) # If --load is set, load model from file, otherwise create model if lstnet_init.load is not None: log.info("Load model from %s", lstnet_init.load) lstnet = LoadModel(lstnet_init.load, custom_objects) else: log.info("Creating model") lstnet = LSTNetModel(lstnet_init, Data.train[0].shape) if lstnet is None: log.critical("Model could not be loaded or created ... exiting!!") exit(1) # Compile model lstnet_tensorboard = ModelCompile(lstnet, lstnet_init) if lstnet_tensorboard is not None: log.info( "Model compiled ... Open tensorboard in order to visualise it!") else: log.info( "Model compiled ... No tensorboard visualisation is available") # Model Training
def optimize_model(hyperparameters): global ITERATION, lstnet_init, Data ITERATION += 1 lstnet_init.lr = hyperparameters['lr'] lstnet_init.dropout = round(hyperparameters['dropout'], 1) lstnet_init.GRUUnits = int(hyperparameters['GRUUnits']) lstnet_init.batchsize = int(hyperparameters['batchsize']) log.info("Hyper-parameters: Learning rate:%f, Batch_size:%f, Dropout:%f, GRUUnits:%f", lstnet_init.lr, lstnet_init.batchsize, lstnet_init.dropout, lstnet_init.GRUUnits) log.info("Creating model") lstnet = LSTNetModel(lstnet_init, Data.train[0].shape) log.info("Compile model") lstnet_tensorboard = ModelCompile(lstnet, lstnet_init) if lstnet_tensorboard is not None: log.info("Model compiled ... Open tensorboard in order to visualise it!") else: log.info("Model compiled ... No tensorboard visualisation is available") log.info("Training model ... ") start_time = datetime.now() h = train(lstnet, Data, lstnet_init, lstnet_init.batchsize, lstnet_init.epochs, lstnet_tensorboard) time = datetime.now() - start_time loss, rse, corr, nrmse, nd = lstnet.evaluate(Data.valid[0], Data.valid[1]) # log.info("Validation on the validation set returned: Loss:%f, RSE:%f, Correlation:%f, NRMSE:%f, ND:%f", loss, rse, corr, nrmse, nd) # Write to the csv file of_connection = open(out_file, 'a') writer = csv.writer(of_connection) for i in range(1, len(h.history['loss'])+1): #(1,lstnet_init.epochs+1): train_metric = {'train_mae': h.history['loss'][i-1], 'train_rse': h.history['rse'][i-1], 'train_corr': h.history['corr'][i-1], 'train_nrmse': h.history['nrmse'][i-1], 'train_nd': h.history['nd'][i-1]} test_metric = {'test_mae': h.history['val_loss'][i-1], 'test_rse': h.history['val_rse'][i-1], 'test_corr': h.history['val_corr'][i-1], 'test_nrmse': h.history['val_nrmse'][i-1], 'test_nd': h.history['val_nd'][i-1]} writer.writerow([ITERATION, i, train_metric, test_metric, hyperparameters, time]) of_connection.close() eval_time = str(datetime.now()-start_time) if (nd == np.inf): nd = 1. elif (nd == np.nan): nd = 1. elif nd == 'nan': nd = 1. result = {'loss': nd, 'rse':rse, 'corr':corr, 'rse':nrmse, 'nd':nd, 'val_loss': loss, 'space': hyperparameters, 'iteration': ITERATION, 'eval_time': eval_time, 'status': STATUS_OK} return result