def evaluate(model, x, y, args, sess, ite=None, writer=None, name=''): loss_meter = AverageMeter() output_all = np.array([]).reshape((-1, 1)) for i in range(0, len(y), args.batch_size): batch_x = x[i:i + args.batch_size] batch_y = y[i:i + args.batch_size] n_flows = len(batch_y) if n_flows != args.batch_size: batch_x = np.concatenate( (batch_x, np.zeros((args.batch_size - n_flows, batch_x.shape[1]))), axis=0) batch_y = np.concatenate( (batch_y, np.zeros(args.batch_size - n_flows)), axis=0) input_feed = { model['feat']: batch_x, model['labels']: batch_y, model['learning_rate']: args.lr, model['data_len']: n_flows, model['keep_probs']: np.ones(len(args.keep_probs)), } loss_b, output_b = sess.run([model['loss'], model['output']], feed_dict=input_feed) loss_meter.update(loss_b) output_all = np.concatenate((output_all, output_b[:n_flows].reshape( (-1, 1)))) if writer is not None: write_summary(writer, '%s loss' % name, loss_meter.avg, ite) return loss_meter.avg, output_all
def train(model, x, y, args, sess, ite, writer, idx=None): if idx is None: idx = [i for i in range(len(y))] random.shuffle(idx) assert len(y) == len(idx), "idx (order) needs to have the same length as y" loss_meter = AverageMeter() # prepare the indices in advance for i in range(0, len(y), args.batch_size): batch_idx = idx[i:i+args.batch_size] batch_x = x[batch_idx] batch_y = y[batch_idx] n_flows = len(batch_y) if n_flows != args.batch_size: batch_x = np.concatenate((batch_x, np.zeros((args.batch_size - n_flows, batch_x.shape[1]))), axis=0) batch_y = np.concatenate((batch_y, np.zeros(args.batch_size - n_flows)), axis=0) input_feed = { model['feat']: batch_x, model['labels']: batch_y, model['learning_rate']: args.lr, model['data_len']: n_flows, model['keep_probs']: args.keep_probs, } _, loss_b = sess.run([model['update_op'], model['loss']], feed_dict=input_feed) loss_meter.update(loss_b) if ite % 100 == 0: write_summary(writer, 'train loss', loss_b, ite) ite = ite + 1 return loss_meter.avg, ite