Beispiel #1
0
def main():
    print_header("START", CONFIG, level=0)
    data = load(CONFIG)

    print(data.head())
    data.to_csv("./outputs/data.csv", index=False)

    describe(data, CONFIG)
    test(data, CONFIG)
    forecast(data, CONFIG)
    predict(data, CONFIG)
    report(data, CONFIG)
    print_header("DONE", CONFIG, level=0)
def run_predict(my_model, n_len, names, last_names, genders, dates):
    normalized_data = data_preparer.normalize_merge_data(
        names, last_names, n_len, genders, dates)

    for i_x, x in enumerate(normalized_data):

        prediction_data = list(map(
            lambda d, i=i_x: [normalized_data[i][0],
                              normalized_data[i][1],
                              normalized_data[i][2],
                              normalized_data[i][3],
                              d[0], d[1], d[2], d[3]],
            normalized_data[i_x+1:]))

        indicies = predict(my_model, prediction_data)
        dups = '\n'.join(list(map(lambda i, j=i_x:
                                  f'{i[1]:.2} : {names[j]}-{last_names[j]}-{genders[j]}-{dates[j]}' +
                                  f' : {names[i[0]+i_x+1]}-{last_names[i[0]+i_x+1]}-{genders[i[0]+i_x+1]}-{dates[i[0]+i_x+1]}',
                                  indicies)))
        if len(dups) > 1:
            print(dups)
            f = open("logs/results.txt", "a")
            f.write(dups + '\n')
            f.close()
        else:
            print(f'no duplicates was found for {names[i_x]}')
Beispiel #3
0
def calculate():
    location = int(request.form['location'])
    area = float(request.form['area'])
    year = int(request.form['year'])
    rooms = int(request.form['rooms'])
    level = int(request.form['level'])
    state = int(request.form['state'])
    city = int(request.form['city'])

    prediction = predict(location, area, year, rooms, level, state,
                         property_model)

    if level == 0:
        level = "Parter"
    else:
        level = f"Poziom {level}"

    cities = ['Kraków', 'Warszawa', 'Łódź']

    locations = [
        'Bieńczyce', 'Bieżanów - Prokocim', 'Bronowice', 'Dębniki',
        'Grzegórzki', 'Krowodrza', 'Łagiewniki - Borek Fałęcki',
        'Mistrzejowice', 'Nowa Huta', 'Podgórze', 'Podgórze Duchackie',
        'Prądnik Biały', 'Prądnik Czerwony', 'Śródmieście', 'Stare Miasto',
        'Swoszowice', 'Wzgórza Krzesławickie', 'Zwierzyniec'
    ]

    states = ['Do remontu', 'Do wykończenia', 'Do zamieszkania']

    context = {
        'prediction': prediction,
        'location': locations[location],
        'area': area,
        'year': year,
        'rooms': rooms,
        'level': level,
        'state': states[state],
        'city': cities[city]
    }

    return render_template('calculate.html', title='Home', context=context)
def update_tickets(ticket: str) -> str:
    """
    Updates the ticket that was passed as an argument
    """
    fieldnames = di.import_config_list(
            "azure_classifier", 
            "target_fieldnames",
            ",")
    
    message = ""
    # !! FOR TESTING
    import datetime
    message += f"{datetime.datetime.now()}\nTEST- Original Message:\n{ticket['description']}\n\n"
    # !! END OF TESTING
    prediction = predict(
            ticket[di.get_config("azure_classifier", "field_for_prediction")], 
            fieldnames)
    for label in prediction:
        message += f"\n{label} => {prediction[label]}\n"
    body = TS.get_updated_ticket_payload(message) 
    put = getattr(TS,'put_to_ticketing_system')
    return put(f"tickets/{ticket['ticket_id']}", json.dumps(body))
 def predict(self):
     return predict(self)
Beispiel #6
0
def main(argv=None):
    image_set = FLAGS.image_set
    model_name = ("%s_%s" % (image_set, FLAGS.model_name))
    data_dir = ("data/tab_products/%s" % image_set)
    model_dir = ("models/tab_products/%s" % (model_name))
    log_dir = ("log/tab_products/%s_%d" % (model_name, int(time.time())))
    batch_size = 100  # min-batch size
    img_width = 48  # original image width
    img_height = 48  # original image height
    img_channel = 1  # original image channel
    category_dim = 213  # master category nums
    learn_rate = 1e-3
    num_epoch = 1000
    report_step = 50

    print("Boot with ... mode: %s, model_name: %s" % (FLAGS.mode, model_name))
    if not os.path.exists(model_dir):
        os.mkdir(model_dir)

    # with tf.Session(conf.remote_host_uri()) as sess:
    with tf.Session() as sess:
        global_step = tf.Variable(0, name='global_step', trainable=False)
        dropout_ratio = tf.placeholder(tf.float32, name='dropout_ratio')
        images = tf.placeholder(
            tf.float32,
            shape=[None, img_height, img_width, img_channel],
            name='images')
        labels = tf.placeholder(tf.int64, shape=[None], name='labels')
        saver = tf.train.Saver(max_to_keep=10)

        logits = model.small_model(images, img_width, img_height, img_channel,
                                   category_dim, dropout_ratio)
        train_opt = trainer.optimizer(logits, labels, learn_rate, global_step)
        accuracy = trainer.evaluater(logits, labels)

        summary_op = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(log_dir, sess.graph)

        training_accuracy_summary = tf.scalar_summary("training_accuracy",
                                                      accuracy)
        validation_accuracy_summary = tf.scalar_summary(
            "validation_accuracy", accuracy)

        # -------- train ------------------------------------------
        restore_or_init_model(model_dir, saver, sess)

        train, valid, test = reader.open_data(data_dir, batch_size)

        if FLAGS.mode == 'console':
            from IPython import embed
            embed()
            sys.exit()

        start_time = time.time()

        for epoch in range(num_epoch):
            for i in range(len(train)):
                step = tf.train.global_step(sess, global_step)

                train_data = reader.feed_dict(data_dir, train[i], 0.5, images,
                                              labels, dropout_ratio)
                sess.run(train_opt, feed_dict=train_data)

                main_summary = sess.run(summary_op, feed_dict=train_data)
                summary_writer.add_summary(main_summary, step)

                if (step % report_step == 0):
                    train_data = reader.feed_dict(data_dir, train[i], 1.0,
                                                  images, labels,
                                                  dropout_ratio)
                    valid_data = reader.feed_dict(data_dir, valid, 1.0, images,
                                                  labels, dropout_ratio)

                    valid_acc_score, valid_acc_summary = sess.run(
                        [accuracy, validation_accuracy_summary],
                        feed_dict=valid_data)
                    train_acc_score, train_acc_summary = sess.run(
                        [accuracy, training_accuracy_summary],
                        feed_dict=train_data)
                    print(
                        "epoch %d, step %d, valid accuracy %g, train accuracy %g"
                        % (epoch, step, valid_acc_score, train_acc_score))

                    summary_writer.add_summary(valid_acc_summary, step)
                    summary_writer.add_summary(train_acc_summary, step)
                    summary_writer.flush()

                    checkpoint_path = os.path.join(model_dir, 'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

            predicter.predict(sess, logits, images, labels, data_dir, valid,
                              dropout_ratio)

        end_time = time.time()
        print("Total time is %s" % (end_time - start_time))
Beispiel #7
0
def work(text):
    if text is None:
        return 0.
    else:
        return predicter.predict(text)