Exemplo n.º 1
0
    def __init__(self):
        self.test = utils.fix_path(FLAGS.test_dir)
        self.train = utils.fix_path(FLAGS.train_dir)
        self.val = utils.fix_path(FLAGS.val_dir)
        self.shuffle_buffer = 1000
        self.to_yuv = FLAGS.to_yuv
        self.input_shape = tuple(FLAGS.train_crop)

        assert os.path.exists(self.train), \
            "Path train_dir: {} does not exist".format(self.train)

        assert os.path.exists(self.val), \
            "Path val_dir: {} does not exist".format(self.val)
Exemplo n.º 2
0
def parse_config(FILENAME):
    scpr = SafeConfigParser()
    scpr.read(utils.fix_path(FILENAME))

    SMTP_SERVER  = scpr.get("smtp", "server")
    SMTP_PORT = scpr.get("smtp", "port")
    USERNAME = scpr.get('account', 'username')
    PASSWORD = scpr.get('account', 'password')
    CSVPATH = utils.fix_path(scpr.get("file", "mails_data"))

    return {'user': USERNAME,
            'passwd': PASSWORD,
            'server': SMTP_SERVER,
            'port': SMTP_PORT,
            'csv_path': CSVPATH}
Exemplo n.º 3
0
def train_model(request_dict: dict = None):
    """
    train model among options specified in project_conf.json.
    :param request_dict: request posted via API
    :return: mae, after saving updated model
    """

    model = None
    if request_dict:
        data = pd.DataFrame(request_dict["bitcoin_last_minute"], index=[0])
    else:
        logging.info("Train mode.")

    model_name = conf_object.project_conf["model"]

    if model_name == 'rfregressor':
        from models.rfregressor import RFregressor
        model = RFregressor()

    if model_name == 'neuralnet':
        from models.neural_net import NeuralNet
        model = NeuralNet(data=data)

    if model_name == 'lstm':
        from models.lstm import LSTM
        model = LSTM(data=data)

    mae = model.eval()

    # save model
    with open(os.path.join(fix_path(), 'models/model.pkl'), 'wb') as f:
        pickle.dump(model, f)

    return mae
def main():
    if fix_path(FLAGS.data_set_path, is_folder=False):
        data_set = get_data_set()

        data_set.save(FLAGS.data_set_path)
    else:
        logging.debug('didn\'t create dataset')
def create_table(csvfile, db_uri):
    engine = create_engine(db_uri, echo=True)
    conn = engine.connect()
    trans = conn.begin()
    db.metadata.create_all(engine)

    with open(fix_path(base_path, csvfile), mode="r") as p:
        csv_data = csv.DictReader(p)
        if csvfile.endswith("yearlytempavg1800.csv"):
            try:
                for row in csv_data:
                    conn.execute(Temperature.__table__.insert(), row)
                trans.commit()
            except:
                trans.rollback()
                raise
        else:
            try:
                for row in csv_data:
                    row["Date"] = datetime.strptime(row["Date"], "%Y-%m-%d")
                    conn.execute(Location.__table__.insert(), row)
                trans.commit()
            except Exception as e:
                print(e)
                trans.rollback()
                raise
Exemplo n.º 6
0
class DevelopmentConfig(Config):
    DEBUG = 1
    FLASK_DEBUG = 1
    SQLALCHEMY_ECHO = 1
    TEMPLATES_AUTO_RELOAD = 1
    SQLALCHEMY_DATABASE_URI = os.getenv(
        "DEV_DATABASE_URI", "sqlite:///" +
        fix_path(basedir, "db/belly_button_biodiversity.sqlite"))
Exemplo n.º 7
0
def main():
    LOG_PATH = "birthreminder.log"
    logging.basicConfig(
        level=logging.DEBUG,
        filename=(utils.fix_path(LOG_PATH)),
        format="%(asctime)s %(name)s %(levelname)s %(message)s")
    today = datetime.today()
    stoday = ddmm_from(today)
    log.info("Checking %s ..." % stoday + "...")
    ctd, ctmr = check_birthday(today)
    log.info("Checked %s , %d today - %d tomorrow" % (stoday, ctd, ctmr))

    send_mail(["*****@*****.**"], "Checked", ddmm_from(datetime.now()))
Exemplo n.º 8
0
def main():
    LOG_PATH = "birthreminder.log"
    logging.basicConfig(
        level=logging.DEBUG,
        filename=(utils.fix_path(LOG_PATH)),
        format="%(asctime)s %(name)s %(levelname)s %(message)s"
    )
    today = datetime.today()
    stoday = ddmm_from(today)
    log.info("Checking %s ..." % stoday + "...")
    ctd, ctmr = check_birthday(today)
    log.info("Checked %s , %d today - %d tomorrow" % (stoday, ctd, ctmr))

    send_mail(["*****@*****.**"],
              "Checked", ddmm_from(datetime.now()))
Exemplo n.º 9
0
def predict_api():

    try:
        model = pd.read_pickle(os.path.join(fix_path(), "models/model.pkl"))
        logging.info("RFregressor version: ",
                     pkg_resources.get_distribution("scikit-learn"))

        # observation = observation.encode()  # this code is for scenario where data is encoded as str in POST
        # observation = pickle.loads(base64.b64decode(observation))
        # request = open('request.json', 'rb')  # todo - comment out if not testing locally
        observation = request.json

        observation = process_request(observation=observation)
        pred = model.get_prediction(observation)

        return jsonify({"bitcoin prediction": str(pred)})

    except Exception as ex:
        logging.error("No model was found, so run /train")
Exemplo n.º 10
0
from tkinter import *
import time
import gaugelib
import utils

# region static for recurrent function
g_value = 0
x = 0
# z = 0
store_data = []

# endregion

# region Excel path fixing
file_path = utils.fix_path()

# endregion


def read_serial_and_save():
    global x
    g_value = utils.read_arduino_serial()
    global store_data
    store_data += [g_value]
    p1.set_value(int(g_value))
    x += 1
    if x > 1000:
        x = 0
    win.after(50, read_serial_and_save)
    # utils.save_to_excel(store_data, str(file_path)) # for automatic save
Exemplo n.º 11
0
class DevelopmentConfig(Config):
    DEBUG = 1
    FLASK_DEBUG = 1
    SQLALCHEMY_ECHO = 1
    TEMPLATES_AUTO_RELOAD = 1
    SQLALCHEMY_DATABASE_URI = os.getenv("DEV_DATABASE_URI", "sqlite:///" + fix_path(basedir, "db/final-dev.sqlite"))
Exemplo n.º 12
0
class ProductionConfig(Config):
    LOG_TO_STDOUT = 1
    SQLALCHEMY_DATABASE_URI = os.getenv("DATABASE_URI", "sqlite:///" + fix_path(basedir, "db/yearlyTempAvg1800.sqlite"))
    SQLALCHEMY_BINDS = {
        "finalDB": "sqlite:///" + fix_path(basedir, "db/final.sqlite"),
        }
Exemplo n.º 13
0
class ProductionConfig(Config):
    LOG_TO_STDOUT = 1
    SQLALCHEMY_DATABASE_URI = os.getenv(
        "DATABASE_URI", "sqlite:///" +
        fix_path(basedir, "db/belly_button_biodiversity.sqlite"))
Exemplo n.º 14
0
def main(argv):

    tf.random.set_seed(FLAGS.seed)

    if FLAGS.tbdir is not None:
        summary_writers = utils.create_summary_writers(
            utils.fix_path(FLAGS.tbdir))

    # prepare dataset
    dataset = datasets.get_dataset()()
    input_shape = dataset.get_input_shape()

    # Create Nets and Optimizers
    encoder_decoder = nets.encoder_decoder(
        input_shape=input_shape,
        msg_length=FLAGS.msg_length,
        noise_layers=FLAGS.noise_layers,
        n_convbnrelu_encoder=FLAGS.n_convbnrelu_encoder,
        n_convbnrelu_decoder=FLAGS.n_convbnrelu_decoder)

    discriminator = nets.discriminator(
        input_shape=input_shape, n_convbnrelu=FLAGS.n_convbnrelu_discriminator)

    optimizer_encoder_decoder = tf.keras.optimizers.Adam(1e-3)
    optimizer_discriminator = tf.keras.optimizers.Adam(1e-3)

    # global step / epoch variables
    step = tf.Variable(0, dtype=tf.int64)
    epoch = tf.Variable(0, dtype=tf.int64)

    # prepare checkpointer
    ckpt = tf.train.Checkpoint(
        step=step,
        epoch=epoch,
        optimizer_encoder_decoder=optimizer_encoder_decoder,
        optimizer_discriminator=optimizer_discriminator,
        encoder_decoder=encoder_decoder,
        discriminator=discriminator)

    ckpt_manager = tf.train.CheckpointManager(ckpt,
                                              utils.fix_path(FLAGS.ckptdir),
                                              max_to_keep=FLAGS.keep_ckpts)

    if ckpt_manager.latest_checkpoint is not None:
        if FLAGS.load_from_ckpt:
            ckpt.restore(ckpt_manager.latest_checkpoint)
            logging.info("Loading model from checkpoint: {}".format(
                ckpt_manager.latest_checkpoint))

    # Metrics Tracker
    metrics_train = metrics.MetricsTracker()
    metrics_val = metrics.MetricsTracker()

    while epoch < FLAGS.epochs:

        dataset_train = dataset.create_train_dataset()

        for epoch_step, cover_images in enumerate(dataset_train):

            messages = tf.random.uniform([FLAGS.batch_size, FLAGS.msg_length],
                                         minval=0,
                                         maxval=2,
                                         dtype=tf.int32)
            messages = tf.cast(messages, dtype=tf.float32)

            time_start = time.time()
            outputs = steps.train(
                cover_images=cover_images,
                messages=messages,
                encoder_decoder=encoder_decoder,
                discriminator=discriminator,
                training=True,
                optimizer_encoder_decoder=optimizer_encoder_decoder,
                optimizer_discriminator=optimizer_discriminator)

            ms_per_step = (time.time() - time_start) * 1000.0
            ms_per_sample = ms_per_step / FLAGS.batch_size

            # Write step summaries
            is_summary_step = (step.numpy() % FLAGS.summary_freq) == 0
            if is_summary_step:

                step_losses = losses.step_loss(
                    cover_images,
                    messages,
                    encoder_decoder_output=outputs['encoder_decoder'],
                    discriminator_on_cover=outputs['discriminator_on_cover'],
                    discriminator_on_encoded=outputs[
                        'discriminator_on_encoded'])

                metrics_train.update(
                    step_losses,
                    messages,
                    encoder_decoder_output=outputs['encoder_decoder'],
                    discriminator_on_cover=outputs['discriminator_on_cover'],
                    discriminator_on_encoded=outputs[
                        'discriminator_on_encoded'])

                metrics_train_results = metrics_train.results()
                metrics_train.reset()

                with summary_writers['train'].as_default():
                    for _name, _value in metrics_train_results.items():
                        tf.summary.scalar(_name, _value, step=step)

                    tf.summary.scalar('ms_per_step', ms_per_step, step=step)

                    tf.summary.scalar('ms_per_sample',
                                      ms_per_sample,
                                      step=step)

            step.assign_add(1)

        ckpt_save_path = ckpt_manager.save()
        logging.info("Saved model after epoch {} to {}".format(
            epoch.numpy(), ckpt_save_path))

        # Training Loss
        logging.info("Epoch {} Stats".format(epoch.numpy()))
        logging.info("Training Stats ===========================")
        for _name, _value in metrics_train_results.items():
            logging.info("{}: {:.4f}".format(_name, _value))

        # Evaluate
        dataset_val = dataset.create_val_dataset()

        for cover_images in dataset_val:

            messages = utils.create_messages(batch_size=cover_images.shape[0],
                                             msg_length=FLAGS.msg_length)

            # messages = tf.random.uniform(
            #     [FLAGS.batch_size, FLAGS.msg_length],
            #     minval=0, maxval=2, dtype=tf.int32)
            # messages = tf.cast(messages, dtype=tf.float32)

            outputs = steps.train(cover_images=cover_images,
                                  messages=messages,
                                  encoder_decoder=encoder_decoder,
                                  discriminator=discriminator,
                                  training=False)

            losses_val_step = losses.step_loss(
                cover_images,
                messages,
                encoder_decoder_output=outputs['encoder_decoder'],
                discriminator_on_cover=outputs['discriminator_on_cover'],
                discriminator_on_encoded=outputs['discriminator_on_encoded'])

            metrics_val.update(
                losses_val_step,
                messages,
                encoder_decoder_output=outputs['encoder_decoder'],
                discriminator_on_cover=outputs['discriminator_on_cover'],
                discriminator_on_encoded=outputs['discriminator_on_encoded'])

        metrics_val_results = metrics_val.results()
        metrics_val.reset()

        logging.info("Validation Stats ===========================")
        with summary_writers['val'].as_default():
            for _name, _value in metrics_val_results.items():
                tf.summary.scalar(_name, _value, step=step)
                logging.info("{}: {:.4f}".format(_name, _value))

        messages = utils.create_messages(batch_size=cover_images.shape[0],
                                         msg_length=FLAGS.msg_length)

        encoder_decoder_output = encoder_decoder(inputs={
            'cover_image': cover_images,
            'message': messages
        },
                                                 training=False)

        # write example images to Summaries
        with summary_writers['val'].as_default():

            transform_fn = None

            if FLAGS.to_yuv:
                transform_fn = tf.image.yuv_to_rgb

            utils.summary_images(
                cover=cover_images,
                encoded=encoder_decoder_output['encoded_image'],
                transmitted_encoded=encoder_decoder_output[
                    'transmitted_encoded_image'],
                transmitted_cover=encoder_decoder_output[
                    'transmitted_cover_image'],
                step=step,
                transform_fn=transform_fn)

        epoch.assign_add(1)
Exemplo n.º 15
0
def fix_paths():
    fix_path(FLAGS.checkpoint_path)