예제 #1
0
def main():
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=4,
                                              shuffle=True,
                                              num_workers=2)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=True,
                                           transform=transform)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=4,
                                             shuffle=False,
                                             num_workers=2)

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    # get some random training images
    dataiter = iter(trainloader)
    images, labels = dataiter.next()

    print(' '.join('%5s' % classes[labels[j]] for j in range(4)))

    model = train(trainloader)
    inference(testloader, classes, model)
예제 #2
0
def M_test(FLAGS):

    CR_Test = ct.Read_TFrecord(FLAGS, FLAGS.test_dir)  ## test data
    next_element_test = CR_Test.Dataset_read()

    ##test = next_element[0].shape

    logits = inference(FLAGS, next_element_test[0])

    total_loss, cross_entropy_mean, accurancy = loss(logits,
                                                     next_element_test[1],
                                                     reuse=True)
    ####################################

    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()

    saver = tf.train.Saver(variables_to_restore)

    with tf.Session() as sess:
        # Load checkpoint
        saver.restore(sess, test_ckpt)
        for i in range(FLAGS.batch_size):
            total_loss = 0
            total_acc = 0
            sess.run(next_element_test)
            loss, acc = sess.run(total_loss, accurancy)
            total_loss += loss
            total_acc += acc

        print("avg_loss: ", total_loss / FLAGS.batch_size)
        print("avg_accuracy:", total_acc / FLAGS.batch_size)
예제 #3
0
 def __init__(self, *args):
     if len(args)==2:
         if args[1]:
             self.X = args[0]
             self.FGene = featureGenerator(self.X)
             self.Model = model(self.X, self.FGene)
             self.Optim = None
             self.Grad = None
             self.Inf = inference(self)
             self.Grad = gradient(self)
             self.initOptimizer()
         else:
             self.X = args[0]
             self.FGene = featureGenerator(self.X)
             self.Model = model(config.fModel)
             self.Optim = None
             self.Grad = None
             self.Inf = inference(self)
             self.Grad = gradient(self)
예제 #4
0
def evaluate():

    X = tf.placeholder(tf.float32, shape=[None, INPUT_HEIGHT, INPUT_WIDTH])
    Y = tf.placeholder(tf.float32, shape=[None, CODE_LEN * NUM_CLASSES])
    keep_prob = tf.placeholder(tf.float32)  # dropout
    # 是否在训练阶段
    is_train = tf.placeholder(tf.bool)

    y = inference(X, None, keep_prob, is_train)

    predict = tf.reshape(y, [-1, CODE_LEN, CHAR_SET_LEN])
    max_idx_pre = tf.argmax(predict, 2)
    max_idx_label = tf.argmax(tf.reshape(Y, [-1, CODE_LEN, CHAR_SET_LEN]), 2)
    correction = tf.equal(max_idx_pre, max_idx_label)
    accuracy = tf.reduce_mean(tf.cast(correction, tf.float32))

    # 加载模型
    saver = tf.train.Saver()
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    with tf.Session(config=config) as sess:
        imgs, labels, txts = get_test_data()
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]

            for i in range(imgs.shape[0]):
                img, label, txt = imgs[i], labels[i], txts[i]
                img = np.reshape(img, [1, INPUT_HEIGHT, INPUT_WIDTH])
                label = np.reshape(label, [1, CODE_LEN * CHAR_SET_LEN])
                pred = sess.run(y,
                                feed_dict={
                                    X: img,
                                    Y: label,
                                    keep_prob: 1.0,
                                    is_train: False
                                })
                print("The label is {}, the prediction is {}".format(
                    vec2text(label), vec2text(pred)))
            accuracy_score, preds = sess.run([accuracy, y],
                                             feed_dict={
                                                 X: imgs,
                                                 Y: labels,
                                                 keep_prob: 1.0,
                                                 is_train: False
                                             })
            print("After %s step(s), the accuracy on test data is %f" %
                  (global_step, accuracy_score))
        else:
            print("No checkpoint found")
예제 #5
0
def train():
    X = tf.placeholder(tf.float32, shape=[None, INPUT_HEIGHT, INPUT_WIDTH])
    Y = tf.placeholder(tf.float32, shape=[None, CODE_LEN * CHAR_SET_LEN])
    # Y = tf.placeholder(tf.float32, shape=[None, CHAR_SET_LEN])
    keep_prob = tf.placeholder(tf.float32)  # dropout
    global_step = tf.Variable(0.0, dtype=tf.float32, trainable=False)
    # 是否在训练阶段
    is_train = tf.placeholder(tf.bool)

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)     #正则化项
    output = inference(X, regularizer, keep_prob, is_train)

    # cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=Y, logits=output)
    y_ = tf.reshape(Y, shape=[-1, CHAR_SET_LEN])            #转化为2维数据
    y = tf.reshape(output, shape=[-1, CHAR_SET_LEN])
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(y_, axis=1), logits=y)
    loss = tf.reduce_mean(cross_entropy)+tf.add_n(tf.get_collection("losses"))

    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, DATA_SIZE / BATCH_SIZE, LEARNING_RATE_DECAY)
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)

    predict = tf.reshape(output, [-1, CODE_LEN, NUM_CLASSES])
    max_idx_pred = tf.argmax(predict, 2)
    max_idx_label = tf.argmax(tf.reshape(Y, [-1, CODE_LEN, NUM_CLASSES]), 2)
    correction = tf.equal(max_idx_pred, max_idx_label)
    accuracy = tf.reduce_mean((tf.cast(correction, tf.float32)))

    saver = tf.train.Saver()
    config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        itr = get_train_batch()
        for step in range(NUM_EPOCHS):
            batch_x, batch_y = next(itr)
            _, loss_ = sess.run([optimizer, loss], feed_dict={X: batch_x, Y: batch_y, keep_prob: 1.0, is_train: True})
            print('Epoch %s/%s, loss=%.2f' % (step, NUM_EPOCHS, loss_))

            # 每100 step计算一次准确率
            if step % 100 == 0 and step != 0:
                # batch_x_val, batch_y_val = next(itr)
                batch_x_val, batch_y_val, txts = get_val_data()
                acc, loss_ = sess.run([accuracy,loss], feed_dict={X: batch_x_val, Y: batch_y_val, keep_prob: 1., is_train: False})

                # for i in range(batch_y_val.shape[0]):
                #     print("The label is {}, the prediction is {}".format(vec2text(batch_y_val[i]), vec2text(pred[i])))

                print("Epoch %s, on validation, loss = %s, accuracy = %s" % (step, loss_, acc))
                # 如果准确率大80%,保存模型,完成训练
                if acc > 0.8:
                    saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=step)
def main(
    epoch: int = 1000,
    k: int = 60,
    batch_size: int = 1,
    frequency: int = 100,
    training_length = 48,
    forecast_window = 24,
    train_csv = "train_dataset.csv",
    test_csv = "test_dataset.csv",
    path_to_save_model = "save_model/",
    path_to_save_loss = "save_loss/", 
    path_to_save_predictions = "save_predictions/", 
    device = "cpu"
):

    clean_directory()

    train_dataset = SensorDataset(csv_name = train_csv, root_dir = "Data/", training_length = training_length, forecast_window = forecast_window)
    train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True)
    test_dataset = SensorDataset(csv_name = test_csv, root_dir = "Data/", training_length = training_length, forecast_window = forecast_window)
    test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=True)

    best_model = transformer(train_dataloader, epoch, k, frequency, path_to_save_model, path_to_save_loss, path_to_save_predictions, device)
    inference(path_to_save_predictions, forecast_window, test_dataloader, device, path_to_save_model, best_model)
예제 #7
0
def show_heatmap(config_path, test_path):
    config = yaml.safe_load(open(config_path))
    device = torch.device('cpu' if config['gpu'] < 0 else 'cuda:%s' %
                          config['gpu'])
    net = load_model(config, device)
    net.eval()

    img_list = os.listdir(test_path)
    K = len(img_list)
    fig = plt.figure(figsize=(20, 10 * K))
    for idx, img_name in enumerate(img_list):
        img_path = os.path.join(test_path, img_name)
        img = get_img(img_path)

        img_paded, info = preprocess_img(
            img, (config['img_size'], config['img_size']))
        imgs = [img]
        infos = [info]

        input = transforms.ToTensor()(img_paded)
        input = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])(input)
        inputs = input.unsqueeze(0).to(device)

        detects = inference(config,
                            net,
                            inputs,
                            infos,
                            topK=50,
                            return_hm=True,
                            th=0.2)
        detect = detects[0]
        hm = detect[3].permute(1, 2, 0).cpu().detach().numpy()[:, :, 0]
        hm = cv2.resize(hm, (config['img_size'], config['img_size']))
        ax = plt.subplot(K, 1, idx + 1)
        plt.xticks([])
        plt.yticks([])
        # plt.tight_layout(pad = 1.0)

        plt.imshow(img_paded)
        plt.imshow(hm, alpha=0.3)
    plt.show()
예제 #8
0
x_s_train, x_s_test, y_s_train_for_cnn, y_s_test_for_cnn, y_s_train_for_lstm, y_s_test_for_lstm = cutting(X_s, One_hot_label_for_cnn_s, One_hot_label_for_lstm_s, training_percentage)
x_s_train, x_s_test, x_s_train_mean, x_s_train_var = standardize(x_s_train, x_s_test)
train_s_dataset = Dataset(x_s_train, y_s_train_for_cnn, y_s_train_for_lstm, x_s_test, y_s_test_for_cnn, y_s_test_for_lstm, False)

X_t, One_hot_label_for_cnn_t, One_hot_label_for_lstm_t = load_data(args.target_filename, seq_len, nb_classes_for_cnn, nb_classes_for_lstm, n_channels, False)
x_t_train, x_t_test, y_t_train_for_cnn, y_t_test_for_cnn, y_t_train_for_lstm, y_t_test_for_lstm = cutting(X_t, One_hot_label_for_cnn_t, One_hot_label_for_lstm_t, training_percentage)
x_t_train, x_t_test, x_t_train_mean, x_t_train_var = standardize(x_t_train, x_t_test)
train_t_dataset = Dataset(x_t_train, y_t_train_for_cnn, y_t_train_for_lstm, x_t_test, y_t_test_for_cnn, y_t_test_for_lstm, False)

X_st, One_hot_label_for_cnn_st, One_hot_label_for_lstm_st = np.concatenate((X_s, X_t), axis=0), np.concatenate((One_hot_label_for_cnn_s, One_hot_label_for_cnn_t), axis=0), np.concatenate((One_hot_label_for_lstm_s, One_hot_label_for_lstm_t), axis=0)
x_st_train, x_st_test, y_st_train_for_cnn, y_st_test_for_cnn, y_st_train_for_lstm, y_st_test_for_lstm = cutting(X_st, One_hot_label_for_cnn_st, One_hot_label_for_lstm_st, training_percentage)
x_st_train, x_st_test, x_st_train_mean, x_st_train_var = standardize(x_st_train, x_st_test)
train_st_dataset = Dataset(x_st_train, y_st_train_for_cnn, y_st_train_for_lstm, x_st_test, y_st_test_for_cnn, y_st_test_for_lstm, True)
print("Data loaded")
# ================================================================================================
net = inference(seq_len, nb_classes_for_cnn, nb_classes_for_lstm, n_channels, learning_rate, gamma, alpha, lambda_c, lambda_r, lambda_g, lambda_d, lambda_lstm, keep_prob)
g_optimizer = tf.train.AdamOptimizer(net.learning_rate_).minimize(net.loss, var_list = [net.var_n, net.var_g, net.var_c, net.var_r, net.var_l])
d_optimizer = tf.train.AdamOptimizer(net.learning_rate_).minimize(net.loss_d, var_list = [net.var_d])
# ================================================================================================

if args.mode == "train":

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print("Setting up Saver...")
        saver = tf.train.Saver()

        x_s_test_stack, y_s_test_for_cnn_stack, y_s_test_for_lstm_stack = train_s_dataset.test_stack()
        x_t_test_stack, y_t_test_for_cnn_stack, y_t_test_for_lstm_stack = train_t_dataset.test_stack()
        x_st_test_stack, y_st_test_for_cnn_stack, y_st_test_for_lstm_stack = train_st_dataset.test_stack()
        feed_test_onSource = {net.x1_t_0: x_s_test_stack[:,0:80], net.x1_t_1: x_s_test_stack[:,80:160], net.x1_t_2: x_s_test_stack[:,160:], net.x2_: x_st_test_stack[:,:80], net.x3_: x_s_test_stack[:,:80], net.labels_: y_s_test_for_cnn_stack, net.lstm_label_: y_s_test_for_lstm_stack}
예제 #9
0
import time
import datetime
import json
import inference
import awsiot.greengrasscoreipc
from awsiot.greengrasscoreipc.model import (PublishToTopicRequest,
                                            PublishMessage, JsonMessage)

# from edge_deploy.component_source.src.inference import Inference

# TIMEOUT = 10
# ipc_client = awsiot.greengrasscoreipc.connect()

inference()

# topic = "mlops/inference/result"

# while True:
#     message = {
#       "timestamp": str(datetime.datetime.now()),
#       "value":mlmodel.inference()
#     }
#     message_json = json.dumps(message).encode('utf-8')

#     request = PublishToTopicRequest()
#     request.topic = topic
#     publish_message = PublishMessage()
#     publish_message.json_message = JsonMessage()
#     publish_message.json_message.message = message
#     request.publish_message = publish_message
#     operation = ipc_client.new_publish_to_topic()
예제 #10
0
def inference_manager(station, weight_pfm_dir, data_series_chara_path, data_series_chara_inf_path, weight_paras_path, \
               weight_evaluate_result_path,evaluate_period,eval_way,top_k, algori, filter_level=0.999):
    print(station)
    if station not in os.listdir('save_2020'):
        print(get_time(), '%s has not been trained' % station)
        return

    station_path = 'save_2020/%s' % station
    weight_pfm_dir = station_path + '/' + weight_pfm_dir
    data_series_chara_path = station_path + '/' + data_series_chara_path
    data_series_chara_inf_path = station_path + '/' + data_series_chara_inf_path
    weight_paras_path = station_path + '/' + weight_paras_path
    weight_evaluate_result_path = station_path + '/' + weight_evaluate_result_path

    resid_mean_path = station_path + '/' + 'resid_mean'
    #########评估和预测###############
    data_series_chara = pl.load(open(data_series_chara_path, 'rb'))
    all_dates_ts = data_series_chara["日期"]
    inference_series_len = len(data_series_chara[series_column_name].iloc[0])

    #评估w的文件路径集合
    weight_paras = pl.load(open(weight_paras_path, 'rb'))
    evaluate_dates_ts = all_dates_ts[(all_dates_ts >= evaluate_period[0])
                                     & (all_dates_ts <= evaluate_period[-1])]
    evaluate_files_path = [
        os.path.join(weight_pfm_dir, "weight_pfm_%s" % d)
        for d in evaluate_dates_ts
        if "weight_pfm_%s" % d in os.listdir(weight_pfm_dir)
    ]
    if len(evaluate_files_path) == 0:
        return
    best_pfm_weight_paras, best_weight_paras_resid = evaluate_weight(
        weight_paras,
        evaluate_files_path,
        weight_evaluate_result_path,
        i=eval_way,
        share_best_weight=False)

    #根据预测期预测
    inference_dates = all_dates_ts[(all_dates_ts >= inference_period[0])
                                   & (all_dates_ts <= inference_period[-1])]

    data_series_chara = filter_and_fix(data_series_chara,
                                       end_date=inference_period[0],
                                       filter_group='星期',
                                       serier_col='分时序列',
                                       level=filter_level)
    resid_list = []
    for date in inference_dates:
        series = data_series_chara.loc[(data_series_chara['日期'] == date),
                                       '分时序列'].iloc[0]
        inference_series, likeness_topk_day_list = inference(
            data_series_chara,
            date,
            best_pfm_weight_paras,
            inference_series_len,
            share_best_weight=False,
            top_k=top_k,
            algori=algori)
        data_series_chara.loc[(data_series_chara['日期'] == date),
                              'inference'] = str(list(inference_series))
        data_series_chara.loc[(data_series_chara['日期'] == date),
                              'best_weight'] = str(best_pfm_weight_paras)
        data_series_chara.loc[(data_series_chara['日期'] == date),
                              'likeness_topk_day_list'] = str(
                                  likeness_topk_day_list)
        resid_list.append(np.abs(series - inference_series))
        pl.dump(data_series_chara, open(data_series_chara_inf_path, 'wb'))

    resid_mean = np.array(resid_list).mean(axis=0).astype(int)
    print(resid_mean)
    pl.dump([station, resid_mean], open(resid_mean_path, 'wb'))
    print(get_time(), "Complete one")
예제 #11
0
def M_training():
    startstep = 0 if not is_finetune else int(FLAGS.finetune.split('-')[-1])

    CR = ct.Read_TFrecord(FLAGS, FLAGS.TFrecord_dir)  ##train data
    next_element = CR.Dataset_read()

    CR_Test = ct.Read_TFrecord(FLAGS, FLAGS.val_dir)  ## test data
    next_element_test = CR_Test.Dataset_read()

    ##test = next_element[0].shape
    global_step = tf.Variable(0, trainable=False)

    logits = inference(FLAGS, next_element[0])

    total_loss, cross_entropy_mean, accurancy = loss(logits,
                                                     next_element[1],
                                                     reuse=False)

    train_op = train(FLAGS, total_loss, global_step)
    ##############################################################################

    #######save
    saver = tf.train.Saver(tf.global_variables())

    summary_op = tf.summary.merge_all()

    with tf.Session() as sess:
        if (is_finetune == True):

            saver.restore(sess, FLAGS.finetune)
        else:
            init = tf.global_variables_initializer()
            sess.run(init)

        for step in range(startstep, startstep + FLAGS.max_steps):

            start_time = time.time()
            _, loss_value = sess.run([train_op, total_loss])
            duration = time.time() - start_time

            sess.run(total_loss)
            #print(loss_value)

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 10 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)
                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                print(format_str % (datetime.now(), step, loss_value,
                                    examples_per_sec, sec_per_batch))

            if step % 1000 == 0:
                print("validating...")
                total_val_toss = 0.0
                sess.run(next_element_test)
                TEST_ITER = NUM_EXAMPLES_PER_EPOCH_FOR_TEST / FLAGS.batch_size

                logits_test = inference(FLAGS,
                                        next_element_test[0],
                                        Reuse=True)

                _val_total_loss, _val_cross_entropy_mean, _val_accurancy = loss(
                    logits_test, next_element_test[1], reuse=True)

                for test_step in range(int(TEST_ITER)):
                    total_val_toss += _val_total_loss
                    sess.run(_val_total_loss)
                print("val_loss:", total_val_toss / TEST_ITER)

                if step == (FLAGS.max_steps - 1):
                    checkpoint_path = os.path.join(FLAGS.save_model,
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)
예제 #12
0
channels = 2

# Network Parameters
lstm_size = num_hidden = 100  # hidden layer num of features
num_classes = 1  # Length of output
number_of_layers = 1
#Start from only one layer

# tf Graph input
X = tf.placeholder("float",
                   [FLAGS.batch_size, timesteps, feature_size, channels])
Y_state = tf.placeholder("float", [FLAGS.batch_size, timesteps, num_classes])
Y_lonlat = tf.placeholder("float",
                          [FLAGS.batch_size, timesteps, num_classes, 2])
xx = tf.reshape(X, [FLAGS.batch_size * timesteps, h, w, 2])
x_em = inference(xx)

print(x_em)
logits, lonlat = RNN(x_em, weights, biases)

prediction_state = []
prediction_lonlat = []
for t in range(timesteps):
    prediction_state.append(
        tf.reshape(tf.nn.softmax(logits[t]),
                   [FLAGS.batch_size, 1, num_classes]))
    prediction_lonlat.append(
        tf.reshape(lonlat[t], [FLAGS.batch_size, 1, num_classes, 2]))
prediction_state = tf.concat(prediction_state, 1)
prediction_lonlat = tf.concat(prediction_lonlat, 1)
예제 #13
0
def getorderlines():
    data = request.data
    param_str = data.decode()
    params = param_str.split('&')
    part_connector = PartRepository()
    package_connector = PackageRepository()
    vehicle_connector = VehicleRepository()
    real_params = []
    for param in params:
        tmp = param.split('=')
        real_params.append(tmp[1])
    print(real_params)
    note = real_params[0]
    chassis = real_params[1]
    note = note.replace('%20', ' ')
    answers = inference([note])
    # print(note)
    answers = rank(answers["answers"])
    # print(answers)
    results = []
    parts, dats, stds = CreateLineIndex()
    chassis_list = LoadChassisLineMatrix()
    for answer in answers:
        linetype = 0
        description = ""
        if str(answer[0]) in parts:
            linetype = 1
            try:
                description = part_connector.GetPartDetail(str(answer[0]))[0]
                elastic_confidence = CalculateConfidenceMarginByChassis(
                    chassis_list, chassis, str(answer[0]), linetype)
            except:
                pass
        elif str(answer[0]) in dats:
            linetype = 2
            try:
                description = package_connector.GetDATDetail(str(answer[0]))[0]
                elastic_confidence = CalculateConfidenceMarginByChassis(
                    chassis_list, chassis, str(answer[0]), linetype)
            except:
                pass
        elif str(answer[0]) in stds:
            linetype = 3
            try:
                description = vehicle_connector.GetSTDDetail(str(answer[0]))[0]
                elastic_confidence = CalculateConfidenceMarginByChassis(
                    chassis_list, chassis, str(answer[0]), linetype)
            except:
                pass
        elif str(answer[0]) == "Straight":
            linetype = 4
            description = 'Straight'
            elastic_confidence = CalculateConfidenceMarginByChassis(
                chassis_list, chassis, description, linetype)
        elif str(answer[0]) == "TextAmount":
            linetype = 7
            description = 'TextAmount'
            elastic_confidence = CalculateConfidenceMarginByChassis(
                chassis_list, chassis, description, linetype)
        confidence = answer[1] + elastic_confidence
        if (confidence > 100):
            confidence = 100
        results.append({
            'linetype': linetype,
            'id': answer[0],
            'description': description,
            'confidence': confidence
        })

    results = sorted(results,
                     key=lambda result: result['confidence'],
                     reverse=True)
    results = list(filter(lambda result: result['confidence'] > 0, results))
    print(results)
    # print(jsonify(results))
    return jsonify(results)