print('testset path:{}'.format(app + '_test_x.npy')) print('testset path:{}'.format(app + '_test_y.npy')) return test_set_x, test_set_y, ground_truth test_set_x, test_set_y, ground_truth = load_dataset() shuffle = False windowlength = params_appliance[args.appliance_name]['windowlength'] sess = tf.InteractiveSession() test_kwag = {'inputs': test_set_x, 'targets': test_set_y} test_provider = DataProvider.DoubleSourceProvider(batchsize=-1, shuffle=False) x = tf.placeholder(tf.float32, shape=[None, 1, windowlength], name='x') y_ = tf.placeholder(tf.int64, shape=[None, 1], name='y_') network = tl.layers.InputLayer(x, name='input_layer') network = tl.layers.ReshapeLayer(network, shape=(-1, windowlength, 1, 1)) network = tl.layers.Conv2dLayer(network, act=tf.nn.relu, shape=[10, 1, 1, 30], strides=[1, 1, 1, 1], padding='SAME', name='cnn1') network = tl.layers.Conv2dLayer(network, act=tf.nn.relu, shape=[8, 1, 30, 30],
def Ouroborosfit(sess, network, cost, dataset, train_op, batchsize, input_size, x, y_, pad, n_epoch=50, val_provider=None, save_model=-1, val_kwag=None, save_path=None, epoch_identifier=None, mean=0, std=1, shuffle=True, print_frame_loss=True): """ :param sess: TensorFlow session sess = tf.InteractiveSession() :param network: a TensorLayer layer the network will be trained :param cost: cost function :param dataset: raw dataset :param train_op: training optimiser :param batchsize: batch size :param input_size: network input size :param x: placeholder input :param y_: placeholder output :param pad: pad of input :param n_epoch: number of epoch :param val_provider: DataProvider for validation :param save_model: save model mode :param val_kwag: parameters dic. fed to the val_provider :param save_path: model save path :param epoch_identifier: path + epoch? or not :param mean: normalised constant mean :param std: normalised constant std :param shuffle: shuffle data or not :param print_frame_loss: print per frame loss or not :return: None """ for epoch in range(n_epoch): start_time = time.time() for frame in xrange(dataset.shape[-1] - input_size[-1]): output_provider = DataProvider.Provider(stride=(1, 1), input_size=input_size, output_size=(1, 1, 1), prediction_gap=0, batchsize=-1, pad=pad, pad_value=0, shuffle=False) if frame == 0: input_source = dataset[:, :, :input_size[-1]] else: out_kwag = { 'inputs': input_source, 'framebatch': 1, 'mean': mean, 'std': std, 'norm_tar': True } frame_prediction = custompredict( sess=sess, network=network, output_provider=output_provider, x=x, fragment_size=1000, output_length=1, y_op=None, out_kwag=out_kwag) frame_prediction = frame_prediction[0].reshape( dataset.shape[0], dataset.shape[1], 1) * std + mean input_source = np.concatenate( [input_source[:, :, 1:], frame_prediction], axis=2) net_input, = output_provider.feed(inputs=input_source, framebatch=1, mean=mean, std=std, norm_tar=True) tra_provider = DataProvider.DoubleSourceProvider( batchsize=batchsize, shuffle=shuffle) ground_truth = dataset[:, :, input_size[-1] + frame].reshape(-1, 1) tra_kwag = { 'inputs': net_input[0], 'targets': (ground_truth - mean) / std } print 'prediction:', np.mean( np.mean(input_source, axis=0), axis=0)[-1], 'GT:', dataset[:, :, input_size[-1] + frame - 1].mean() if print_frame_loss: print("Epoch %d, frame %d of %d" % (epoch + 1, frame, dataset.shape[-1] - input_size[-1])), easyfit(sess=sess, network=network, cost=cost, train_op=train_op, tra_provider=tra_provider, x=x, y_=y_, n_epoch=1, tra_kwag=tra_kwag, print_loss=print_frame_loss) if val_provider is not None: customtest(sess=sess, network=network, acc=None, test_provider=val_provider, x=x, y_=y_, cost=cost, test_kwag=val_kwag) if save_model > 0 and epoch % save_model == 0: if epoch_identifier: modelsaver(network=network, path=save_path, epoch_identifier=epoch) else: modelsaver(network=network, path=save_path, epoch_identifier=None) print 'Epoch took:', time.time() - start_time, 's' if save_model == -1: modelsaver(network=network, path=save_path, epoch_identifier=None)