def __init__(self, epochs=None, batch_size=None, timestep=None, save=True):
        self.save = save
        self.myModelHandler = ModelHandler()

        # Hyperparameters: User chosen parameters
        self._epochs = epochs
        self._batch_size = batch_size
        self._timestep = timestep
    def predict(self, stock):
        from model_handler import ModelHandler
        import matplotlib.dates as mdates
        import datetime as dt
        import numpy as np
        import matplotlib.pyplot as plt
        import pandas as pd

        myModelHandler = ModelHandler()
        regressor = myModelHandler.load_json_model(stock)

        # Importing the training set
        dataset = pd.read_csv(stock.csv_name)  #
        dates = dataset.iloc[len(dataset) - 31:len(dataset) - 1, 0].values
        dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]

        # Feature Scaling
        from sklearn.preprocessing import MinMaxScaler
        sc = MinMaxScaler(feature_range=(0, 1))

        dataset_test = dataset[len(dataset) - 30:]
        real_stock_price = dataset_test.iloc[:, 1:2].values

        # Getting the predicted stock price
        dataset = dataset['Open']
        inputs = dataset[len(dataset) - len(dataset_test) - 60:].values
        inputs = inputs.reshape(-1, 1)
        inputs = sc.fit_transform(inputs)

        X_test = []
        for i in range(60, 90):
            X_test.append(inputs[i - 60:i, 0])

        X_test = np.array(X_test)
        X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))

        predicted_stock_price = regressor.predict(X_test)
        print(real_stock_price)

        predicted_stock_price = sc.inverse_transform(predicted_stock_price)
        plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
        plt.gca().xaxis.set_major_locator(mdates.DayLocator())
        plt.plot(dates,
                 real_stock_price,
                 color='red',
                 label=f'Actual {stock.ticker} Stock Price')
        plt.plot(dates,
                 predicted_stock_price,
                 color='blue',
                 label=f'Predicted {stock.ticker} Stock Price')
        plt.gcf().autofmt_xdate()
        plt.title(f'{stock.ticker} Stock Price Prediction')
        plt.xlabel('Time')
        plt.ylabel(f'{stock.ticker} Stock Price')
        plt.legend()
        plt.show()
    def forecast(self, stock):
        """Loads the trained model, forecasts the values then displays a graph of the values"""
        # Load the trained model
        model_handler = ModelHandler()
        model = model_handler.load_json_model(stock)

        # Importing the training set
        dataset = pd.read_csv(stock.csv_name)
        dates = dataset.iloc[len(dataset) - 31:len(dataset) - 1, 0].values
        dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]

        # Create the test dataset
        dataset_test = dataset[len(dataset) - 30:]
        real_stock_price = dataset_test.iloc[:, 1:2].values
        dataset = dataset['Open']
        inputs = dataset[len(dataset) - len(dataset_test) - 60:].values
        inputs = inputs.reshape(-1, 1)

        # Feature Scaling
        sc = MinMaxScaler(feature_range=(0, 1))
        inputs = sc.fit_transform(inputs)

        x_test = []
        x_test.append(inputs[0:60, 0])
        predicted_values = []
        for i in range(1, 31):
            x_test_np = np.array(x_test)
            x_test_np = np.reshape(x_test_np,
                                   (x_test_np.shape[0], x_test_np.shape[1], 1))
            new_data = model.predict(x_test_np)
            predicted_values.append(new_data[0])
            x_test[0] = np.delete(x_test[0], 0)
            x_test[0] = np.concatenate([x_test[0], new_data[0]])

        predicted_values = sc.inverse_transform(predicted_values)
        plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
        plt.gca().xaxis.set_major_locator(mdates.DayLocator())
        plt.plot(dates,
                 real_stock_price,
                 color='red',
                 label=f'Actual {stock.ticker} Stock Price')
        plt.plot(dates,
                 predicted_values,
                 color='blue',
                 label=f'Predicted {stock.ticker} Stock Price')
        plt.gcf().autofmt_xdate()
        plt.title(f'{stock.ticker} Stock Price Prediction')
        plt.xlabel('Time')
        plt.ylabel(f'{stock.ticker} Stock Price')
        plt.legend()
        plt.show()
Exemplo n.º 4
0
def init_context(context):
    context.logger.info("Init context...  0%")

    model = ModelHandler()
    setattr(context.user_data, 'model', model)

    context.logger.info("Init context...100%")
Exemplo n.º 5
0
def init_context(context):
    context.logger.info("Init context...  0%")

    model = ModelHandler()
    context.user_data.model = model

    context.logger.info("Init context...100%")
Exemplo n.º 6
0
    def fileLoad(self):
        self.filename = filedialog.askopenfilename()

        if self.filename != '':
            self.lb.config(text="File selected: " + self.filename)
            self.modelHandler1 = ModelHandler(self.filename)
            self.modelDrawer()

        else:
            self.lb.config(text="No file is selected.")
Exemplo n.º 7
0
def test_handle(model_hdlr: ModelHandler,
                   rqst: Request,
                   context: Context,
                   num_samples: int,
                   quantiles: List[str]):
    model_hdlr.initialize(context)
    results_list = model_hdlr.handle([rqst], context)
    
    # Make sure each JSON line is somewhat correct.
    # for each request
    for results_list_request in results_list:
        for line in io.StringIO(results_list_request):
            d = json.loads(line)
            for quantile in quantiles:
                assert quantile in d["quantiles"]
                assert len(d["quantiles"][quantile]) == model_hdlr.mx_model.prediction_length
            assert ("mean" in d) and (len(d["mean"]) == model_hdlr.mx_model.prediction_length)

    # Print results; need to run pytest -v -rA --tb=short ...
    print(results_list)
    return results_list
Exemplo n.º 8
0
def init_context(context):
    context.logger.info("Init context...  0%")

    # Read labels
    functionconfig = yaml.safe_load(open("/opt/nuclio/function.yaml"))
    labels_spec = functionconfig['metadata']['annotations']['spec']
    labels = {item['id']: item['name'] for item in json.loads(labels_spec)}

    # Read the DL model
    model = ModelHandler(labels)
    setattr(context.user_data, 'model', model)

    context.logger.info("Init context...100%")
Exemplo n.º 9
0
class Input(object):
    def __init__(self, model_file, syllable_file, syl2chinese):
        self.modelhandler = ModelHandler(model_file)
        self.syllablehandler = SyllableHandler(syllable_file)
        self.lexihandler = LexiHandler(syl2chinese)
        self.Init()

    def Init(self):
        self.syllablehandler.Init()
        self.lexihandler.Init()
        self.modelhandler.Init()

    def SpellSplit(self, pinyin):
        return self.syllablehandler.SpellSplit(pinyin)

    def SpellToChi(self, spells):
        """
        改进字典树 拼音转汉字
        """
        results = list()
        ch = self.lexihandler.GetChineseFromSpells(spells)
        if ch:
            results.append(ch)
        else:
            #没找到 需要在单个字符查找
            for i in spells:
                res = self.lexihandler.GetChineseFromSpells([i,])
                if res:
                    results.append(res)
        return results


    def GetChineseFromPinyin(self, pinyin):
        spelllist = self.SpellSplit(pinyin)
        return self.SpellToChi(spelllist)

    def GetProba(self, f, s):
        return self.modelhandler.GetChineseProb(f,s)
Exemplo n.º 10
0
def init_context(context):
    context.logger.info("Init context...  0%")

    # Read labels
    with open("/opt/nuclio/function.yaml", 'rb') as function_file:
        functionconfig = yaml.safe_load(function_file)
    labels_spec = functionconfig['metadata']['annotations']['spec']
    labels = {item['id']: item['name'] for item in json.loads(labels_spec)}

    # Read the DL model
    model = ModelHandler(labels)
    context.user_data.model = model

    context.logger.info("Init context...100%")
Exemplo n.º 11
0
    def handle(self, **kwargs):
        cqfs = kwargs['cqfs']

        handler_2 = ModelHandler(successor=None)
        handler_1 = ValidateVectorHandler(successor=handler_2)

        try:
            weight_path = cqfs.dlg.ignicao_line_entrada_formula.text()
            if not os.path.isfile(weight_path):
                raise RuntimeError('Weight file is mandatory.')

            with open(weight_path, 'r') as stream:
                weight = yaml.load(stream)

                if weight is None or len(weight) == 0:
                    raise RuntimeError('Fail to load weight file.')

                kwargs['weight'] = weight
                handler_1.handle(**kwargs)
        except Exception as e:
            error_msg = u'{} => {}'.format(unicode(e),
                                           unicode(traceback.format_exc()))
            self.logger.fatal(error_msg)
            cqfs.show_error(error_msg)
Exemplo n.º 12
0
def compute_bert_embedding( ):
    args = coqa_parser( )
    handler = ModelHandler(args)
Exemplo n.º 13
0
def main(args):
    print_config(args)
    set_random_seed(args['random_seed'])
    model = ModelHandler(args)
    model.train()
    model.test()
Exemplo n.º 14
0
 def __init__(self, model_file, syllable_file, syl2chinese):
     self.modelhandler = ModelHandler(model_file)
     self.syllablehandler = SyllableHandler(syllable_file)
     self.lexihandler = LexiHandler(syl2chinese)
     self.Init()
Exemplo n.º 15
0
def train(mode, base, set_epochs=30):

    print("===== mode: {} | base: {} =====".format(mode, base))

    cwd = os.getcwd()
    print("current working dir: ", cwd)

    cnn_dir = os.path.dirname(cwd)

    if mode == 'integrated':
        data_dir = os.path.join(cnn_dir, "dogs_vs_cats_integrated")
    elif mode == 'native':
        data_dir = os.path.join(cnn_dir, "dogs_vs_cats_smaller")
    train_dir = os.path.join(data_dir, "train")  # global
    validation_dir = os.path.join(data_dir, "validation")  # global
    print("train data is in ... ", train_dir)
    print("validation data is in ...", validation_dir)

    # make log dir -----
    log_dir = os.path.join(cwd, 'comp_log')
    os.makedirs(log_dir, exist_ok=True)
    child_log_dir = os.path.join(log_dir, "{}_{}".format(mode, base))
    os.makedirs(child_log_dir, exist_ok=True)

    dh = DaHandler()
    train_generator = dh.dataGeneratorFromDir(target_dir=train_dir)
    validation_generator = dh.dataGeneratorFromDir(target_dir=validation_dir)

    data_checker, label_checker = next(train_generator)

    print("data_checker shape : ", data_checker.shape)
    print("label_checker shape : ", label_checker.shape)

    INPUT_SIZE = data_checker.shape[1]
    print("INPUT_SIZE: ", INPUT_SIZE)

    CHANNEL = data_checker.shape[3]
    print("set channel : ", CHANNEL)

    batch_size = data_checker.shape[0]
    print("batch_size : ", batch_size)

    mh = ModelHandler(INPUT_SIZE, CHANNEL)

    if base == 'mymodel':
        model = mh.buildMyModel()
    elif base == 'mnv1':
        model = mh.buildTlearnModel(base='mnv1')

    model.summary()

    steps_per_epoch = train_generator.n // batch_size
    validation_steps = validation_generator.n // batch_size
    print(steps_per_epoch, " [steps / epoch]")
    print(validation_steps, " (validation steps)")

    if mode == 'native':
        set_epochs *= 2

    history = model.fit_generator(train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=set_epochs,
                                  validation_data=validation_generator,
                                  validation_steps=validation_steps,
                                  verbose=1)

    # save model & weights
    model_file = os.path.join(child_log_dir,
                              '{}_{}_model.h5'.format(mode, base))
    model.save(model_file)

    # save history
    history_file = os.path.join(child_log_dir,
                                '{}_{}_history.pkl'.format(mode, base))
    with open(history_file, 'wb') as p:
        pickle.dump(history.history, p)

    print("\nexport logs in ", child_log_dir)

    # return 処理 -----
    acc_list = history.history['accuracy']
    last_acc = acc_list[len(acc_list) - 1]
    print("\nlast accuracy: ", last_acc)
    val_acc_list = history.history['val_accuracy']
    last_val_acc = val_acc_list[len(val_acc_list) - 1]
    print("last validation accuracy: ", last_val_acc)

    return last_acc, last_val_acc
Exemplo n.º 16
0
sys.path.append(os.pardir)

# -----
from da_handler import DaHandler
dah = DaHandler()

aug_list = dah.imgaug_mode_list
aug_list.pop(0)

# -----
from data_handler import DataHandler
dth = DataHandler()

# -----
from model_handler import ModelHandler
mh = ModelHandler(224, 3)  # input_size=224, ch=3(共通)
# -----
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)

from keras.callbacks import EarlyStopping


# -----
def data_create():

    print("Do while below -----\n", aug_list)

    for aug in aug_list:
Exemplo n.º 17
0
parser.add_argument('--batch-size', type=int, default=2)
parser.add_argument('--shuffle', type=str2bool, default=True)
parser.add_argument('--max_epochs', type=int, default=20)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--grad_clip', type=float, default=1.0)
parser.add_argument('--verbose',
                    type=int,
                    default=200,
                    help="print after verbose epochs")
parser.add_argument(
    '--gradient_accumulation_steps',
    type=int,
    default=2,
    help=
    "Number of updates steps to accumulate before performing a backward/update pass."
)
parser.add_argument("--adam_epsilon",
                    default=1e-8,
                    type=float,
                    help="Epsilon for Adam optimizer.")

args = vars(parser.parse_args())

if args['model_name'] == 'SpanBERT':
    download_model()
    args['model_path'] = 'tmp_'

# TODO: cuda check

handler = ModelHandler(args)
handler.train()
Exemplo n.º 18
0
def test_preprocess(model_hdlr: ModelHandler,
                   rqst: Request):
    results = model_hdlr.preprocess([rqst])
    assert len(results) == 1
Exemplo n.º 19
0
def test_initialize(model_hdlr: ModelHandler,
                    context: Context):
    model_hdlr.initialize(context)
    predictor = model_hdlr.mx_model
    assert isinstance(predictor, Predictor)
Exemplo n.º 20
0
def test_load_model(model_hdlr: ModelHandler):
    predictor = model_hdlr.load_model("../models/DeepAREstimator")
    assert isinstance(predictor, Predictor)
Exemplo n.º 21
0
def model_hdlr():
    model_hdlr = ModelHandler()
    return model_hdlr
class LSTMModel:
    def __init__(self, epochs=None, batch_size=None, timestep=None, save=True):
        self.save = save
        self.myModelHandler = ModelHandler()

        # Hyperparameters: User chosen parameters
        self._epochs = epochs
        self._batch_size = batch_size
        self._timestep = timestep

    # Properties
    @property
    def epochs(self):
        return self._epochs

    @epochs.setter
    def epochs(self, x):
        self._epochs = x

    @property
    def batch_size(self):
        return self._batch_size

    @batch_size.setter
    def batch_size(self, x):
        self._batch_size = x

    @property
    def timestep(self):
        return self._timestep

    @timestep.setter
    def timestep(self, x):
        self._timestep = x

    def create_network(self, x_train, y_train, stock):
        """Creates the network, adds layers then trains the network."""
        model = Sequential()
        model.add(
            cLSTM(units=50,
                  return_sequences=True,
                  input_shape=(x_train.shape[1], 1)))
        model.add(Dropout(0.2))
        model.add(cLSTM(units=50, return_sequences=True))
        model.add(Dropout(0.2))
        model.add(cLSTM(units=50, return_sequences=True))
        model.add(Dropout(0.2))
        model.add(cLSTM(units=50))
        model.add(Dropout(0.2))
        model.add(Dense(units=1))
        model.compile(optimizer='adam', loss='mean_squared_error')
        model.fit(x_train,
                  y_train,
                  epochs=self.epochs,
                  batch_size=self.batch_size)  # Trains the model
        self.myModelHandler.save_model_as_json(
            model, stock)  # Saves the model as .json and weights as .h5

    def prepare_dataset(self, stock):
        """Reads the stock CSV then creates the training datasets."""
        dataset = pd.DataFrame(yf.download(stock))
        dataset_train = dataset[
            0:len(dataset) -
            30]  # Training dataset will be the whole dataset minus the test dataset
        training_set = dataset_train.iloc[:, 1:
                                          2].values  # Use the open values only

        # Feature Scaling
        sc = MinMaxScaler(feature_range=(
            0, 1))  # Scales all values so that they are in the range (0, 1)
        training_set_scaled = sc.fit_transform(training_set)

        # Create an input data structure with a timestep
        x_train = []
        y_train = []
        for i in range(self.timestep, len(dataset) - 30):
            x_train.append(training_set_scaled[i - self.timestep:i, 0])
            y_train.append(training_set_scaled[i, 0])
        x_train, y_train = np.array(x_train), np.array(y_train)

        # Reshaping
        x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))

        return x_train, y_train

    def testprepare_dataset(self, stock):
        """Reads the stock CSV then creates the training datasets."""
        dataset = pd.DataFrame(yf.download(stock))
        dataset = dataset.set_index('Date').diff()
        dataset = dataset.drop[dataset.index[0]]  # Drops the first null value
        dataset_train = dataset[
            0:len(dataset) -
            30]  # Training dataset will be the whole dataset minus the test dataset
        training_set = dataset_train.iloc[:, 1:
                                          2].values  # Use the open values only

        # Feature Scaling
        sc = MinMaxScaler(feature_range=(
            0, 1))  # Scales all values so that they are in the range (0, 1)
        training_set_scaled = sc.fit_transform(training_set)

        # Create an input data structure with a timestep
        x_train = []
        y_train = []
        for i in range(self.timestep, len(dataset) - 30):
            x_train.append(training_set_scaled[i - self.timestep:i, 0])
            y_train.append(training_set_scaled[i, 0])
        x_train, y_train = np.array(x_train), np.array(y_train)

        # Reshaping
        x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))

        return x_train, y_train

    def forecast(self, stock):
        """Loads the trained model, forecasts the values then displays a graph of the values"""
        # Load the trained model
        model_handler = ModelHandler()
        model = model_handler.load_json_model(stock)

        # Importing the training set
        dataset = pd.read_csv(stock.csv_name)
        dates = dataset.iloc[len(dataset) - 31:len(dataset) - 1, 0].values
        dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]

        # Create the test dataset
        dataset_test = dataset[len(dataset) - 30:]
        real_stock_price = dataset_test.iloc[:, 1:2].values
        dataset = dataset['Open']
        inputs = dataset[len(dataset) - len(dataset_test) - 60:].values
        inputs = inputs.reshape(-1, 1)

        # Feature Scaling
        sc = MinMaxScaler(feature_range=(0, 1))
        inputs = sc.fit_transform(inputs)

        x_test = []
        x_test.append(inputs[0:60, 0])
        predicted_values = []
        for i in range(1, 31):
            x_test_np = np.array(x_test)
            x_test_np = np.reshape(x_test_np,
                                   (x_test_np.shape[0], x_test_np.shape[1], 1))
            new_data = model.predict(x_test_np)
            predicted_values.append(new_data[0])
            x_test[0] = np.delete(x_test[0], 0)
            x_test[0] = np.concatenate([x_test[0], new_data[0]])

        predicted_values = sc.inverse_transform(predicted_values)
        plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
        plt.gca().xaxis.set_major_locator(mdates.DayLocator())
        plt.plot(dates,
                 real_stock_price,
                 color='red',
                 label=f'Actual {stock.ticker} Stock Price')
        plt.plot(dates,
                 predicted_values,
                 color='blue',
                 label=f'Predicted {stock.ticker} Stock Price')
        plt.gcf().autofmt_xdate()
        plt.title(f'{stock.ticker} Stock Price Prediction')
        plt.xlabel('Time')
        plt.ylabel(f'{stock.ticker} Stock Price')
        plt.legend()
        plt.show()

    def predict(self, stock):
        from model_handler import ModelHandler
        import matplotlib.dates as mdates
        import datetime as dt
        import numpy as np
        import matplotlib.pyplot as plt
        import pandas as pd

        myModelHandler = ModelHandler()
        regressor = myModelHandler.load_json_model(stock)

        # Importing the training set
        dataset = pd.read_csv(stock.csv_name)  #
        dates = dataset.iloc[len(dataset) - 31:len(dataset) - 1, 0].values
        dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]

        # Feature Scaling
        from sklearn.preprocessing import MinMaxScaler
        sc = MinMaxScaler(feature_range=(0, 1))

        dataset_test = dataset[len(dataset) - 30:]
        real_stock_price = dataset_test.iloc[:, 1:2].values

        # Getting the predicted stock price
        dataset = dataset['Open']
        inputs = dataset[len(dataset) - len(dataset_test) - 60:].values
        inputs = inputs.reshape(-1, 1)
        inputs = sc.fit_transform(inputs)

        X_test = []
        for i in range(60, 90):
            X_test.append(inputs[i - 60:i, 0])

        X_test = np.array(X_test)
        X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))

        predicted_stock_price = regressor.predict(X_test)
        print(real_stock_price)

        predicted_stock_price = sc.inverse_transform(predicted_stock_price)
        plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
        plt.gca().xaxis.set_major_locator(mdates.DayLocator())
        plt.plot(dates,
                 real_stock_price,
                 color='red',
                 label=f'Actual {stock.ticker} Stock Price')
        plt.plot(dates,
                 predicted_stock_price,
                 color='blue',
                 label=f'Predicted {stock.ticker} Stock Price')
        plt.gcf().autofmt_xdate()
        plt.title(f'{stock.ticker} Stock Price Prediction')
        plt.xlabel('Time')
        plt.ylabel(f'{stock.ticker} Stock Price')
        plt.legend()
        plt.show()