예제 #1
0
파일: main.py 프로젝트: z80020100/cvat
def init_context(context):
    context.logger.info("Init context...  0%")

    model = ModelHandler()
    setattr(context.user_data, 'model', model)

    context.logger.info("Init context...100%")
예제 #2
0
파일: main.py 프로젝트: quuhua911/cvat
def init_context(context):
    context.logger.info("Init context...  0%")

    model = ModelHandler()
    context.user_data.model = model

    context.logger.info("Init context...100%")
    def __init__(self, epochs=None, batch_size=None, timestep=None, save=True):
        self.save = save
        self.myModelHandler = ModelHandler()

        # Hyperparameters: User chosen parameters
        self._epochs = epochs
        self._batch_size = batch_size
        self._timestep = timestep
    def predict(self, stock):
        from model_handler import ModelHandler
        import matplotlib.dates as mdates
        import datetime as dt
        import numpy as np
        import matplotlib.pyplot as plt
        import pandas as pd

        myModelHandler = ModelHandler()
        regressor = myModelHandler.load_json_model(stock)

        # Importing the training set
        dataset = pd.read_csv(stock.csv_name)  #
        dates = dataset.iloc[len(dataset) - 31:len(dataset) - 1, 0].values
        dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]

        # Feature Scaling
        from sklearn.preprocessing import MinMaxScaler
        sc = MinMaxScaler(feature_range=(0, 1))

        dataset_test = dataset[len(dataset) - 30:]
        real_stock_price = dataset_test.iloc[:, 1:2].values

        # Getting the predicted stock price
        dataset = dataset['Open']
        inputs = dataset[len(dataset) - len(dataset_test) - 60:].values
        inputs = inputs.reshape(-1, 1)
        inputs = sc.fit_transform(inputs)

        X_test = []
        for i in range(60, 90):
            X_test.append(inputs[i - 60:i, 0])

        X_test = np.array(X_test)
        X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))

        predicted_stock_price = regressor.predict(X_test)
        print(real_stock_price)

        predicted_stock_price = sc.inverse_transform(predicted_stock_price)
        plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
        plt.gca().xaxis.set_major_locator(mdates.DayLocator())
        plt.plot(dates,
                 real_stock_price,
                 color='red',
                 label=f'Actual {stock.ticker} Stock Price')
        plt.plot(dates,
                 predicted_stock_price,
                 color='blue',
                 label=f'Predicted {stock.ticker} Stock Price')
        plt.gcf().autofmt_xdate()
        plt.title(f'{stock.ticker} Stock Price Prediction')
        plt.xlabel('Time')
        plt.ylabel(f'{stock.ticker} Stock Price')
        plt.legend()
        plt.show()
예제 #5
0
    def fileLoad(self):
        self.filename = filedialog.askopenfilename()

        if self.filename != '':
            self.lb.config(text="File selected: " + self.filename)
            self.modelHandler1 = ModelHandler(self.filename)
            self.modelDrawer()

        else:
            self.lb.config(text="No file is selected.")
예제 #6
0
def init_context(context):
    context.logger.info("Init context...  0%")

    # Read labels
    functionconfig = yaml.safe_load(open("/opt/nuclio/function.yaml"))
    labels_spec = functionconfig['metadata']['annotations']['spec']
    labels = {item['id']: item['name'] for item in json.loads(labels_spec)}

    # Read the DL model
    model = ModelHandler(labels)
    setattr(context.user_data, 'model', model)

    context.logger.info("Init context...100%")
    def forecast(self, stock):
        """Loads the trained model, forecasts the values then displays a graph of the values"""
        # Load the trained model
        model_handler = ModelHandler()
        model = model_handler.load_json_model(stock)

        # Importing the training set
        dataset = pd.read_csv(stock.csv_name)
        dates = dataset.iloc[len(dataset) - 31:len(dataset) - 1, 0].values
        dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]

        # Create the test dataset
        dataset_test = dataset[len(dataset) - 30:]
        real_stock_price = dataset_test.iloc[:, 1:2].values
        dataset = dataset['Open']
        inputs = dataset[len(dataset) - len(dataset_test) - 60:].values
        inputs = inputs.reshape(-1, 1)

        # Feature Scaling
        sc = MinMaxScaler(feature_range=(0, 1))
        inputs = sc.fit_transform(inputs)

        x_test = []
        x_test.append(inputs[0:60, 0])
        predicted_values = []
        for i in range(1, 31):
            x_test_np = np.array(x_test)
            x_test_np = np.reshape(x_test_np,
                                   (x_test_np.shape[0], x_test_np.shape[1], 1))
            new_data = model.predict(x_test_np)
            predicted_values.append(new_data[0])
            x_test[0] = np.delete(x_test[0], 0)
            x_test[0] = np.concatenate([x_test[0], new_data[0]])

        predicted_values = sc.inverse_transform(predicted_values)
        plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
        plt.gca().xaxis.set_major_locator(mdates.DayLocator())
        plt.plot(dates,
                 real_stock_price,
                 color='red',
                 label=f'Actual {stock.ticker} Stock Price')
        plt.plot(dates,
                 predicted_values,
                 color='blue',
                 label=f'Predicted {stock.ticker} Stock Price')
        plt.gcf().autofmt_xdate()
        plt.title(f'{stock.ticker} Stock Price Prediction')
        plt.xlabel('Time')
        plt.ylabel(f'{stock.ticker} Stock Price')
        plt.legend()
        plt.show()
예제 #8
0
파일: main.py 프로젝트: quuhua911/cvat
def init_context(context):
    context.logger.info("Init context...  0%")

    # Read labels
    with open("/opt/nuclio/function.yaml", 'rb') as function_file:
        functionconfig = yaml.safe_load(function_file)
    labels_spec = functionconfig['metadata']['annotations']['spec']
    labels = {item['id']: item['name'] for item in json.loads(labels_spec)}

    # Read the DL model
    model = ModelHandler(labels)
    context.user_data.model = model

    context.logger.info("Init context...100%")
예제 #9
0
    def handle(self, **kwargs):
        cqfs = kwargs['cqfs']

        handler_2 = ModelHandler(successor=None)
        handler_1 = ValidateVectorHandler(successor=handler_2)

        try:
            weight_path = cqfs.dlg.ignicao_line_entrada_formula.text()
            if not os.path.isfile(weight_path):
                raise RuntimeError('Weight file is mandatory.')

            with open(weight_path, 'r') as stream:
                weight = yaml.load(stream)

                if weight is None or len(weight) == 0:
                    raise RuntimeError('Fail to load weight file.')

                kwargs['weight'] = weight
                handler_1.handle(**kwargs)
        except Exception as e:
            error_msg = u'{} => {}'.format(unicode(e),
                                           unicode(traceback.format_exc()))
            self.logger.fatal(error_msg)
            cqfs.show_error(error_msg)
예제 #10
0
def main(args):
    print_config(args)
    set_random_seed(args['random_seed'])
    model = ModelHandler(args)
    model.train()
    model.test()
예제 #11
0
parser.add_argument('--batch-size', type=int, default=2)
parser.add_argument('--shuffle', type=str2bool, default=True)
parser.add_argument('--max_epochs', type=int, default=20)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--grad_clip', type=float, default=1.0)
parser.add_argument('--verbose',
                    type=int,
                    default=200,
                    help="print after verbose epochs")
parser.add_argument(
    '--gradient_accumulation_steps',
    type=int,
    default=2,
    help=
    "Number of updates steps to accumulate before performing a backward/update pass."
)
parser.add_argument("--adam_epsilon",
                    default=1e-8,
                    type=float,
                    help="Epsilon for Adam optimizer.")

args = vars(parser.parse_args())

if args['model_name'] == 'SpanBERT':
    download_model()
    args['model_path'] = 'tmp_'

# TODO: cuda check

handler = ModelHandler(args)
handler.train()
예제 #12
0
def model_hdlr():
    model_hdlr = ModelHandler()
    return model_hdlr
예제 #13
0
sys.path.append(os.pardir)

# -----
from da_handler import DaHandler
dah = DaHandler()

aug_list = dah.imgaug_mode_list
aug_list.pop(0)

# -----
from data_handler import DataHandler
dth = DataHandler()

# -----
from model_handler import ModelHandler
mh = ModelHandler(224, 3)  # input_size=224, ch=3(共通)
# -----
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)

from keras.callbacks import EarlyStopping


# -----
def data_create():

    print("Do while below -----\n", aug_list)

    for aug in aug_list:
예제 #14
0
 def __init__(self, model_file, syllable_file, syl2chinese):
     self.modelhandler = ModelHandler(model_file)
     self.syllablehandler = SyllableHandler(syllable_file)
     self.lexihandler = LexiHandler(syl2chinese)
     self.Init()
예제 #15
0
파일: compare.py 프로젝트: sudachi0114/cnn
def train(mode, base, set_epochs=30):

    print("===== mode: {} | base: {} =====".format(mode, base))

    cwd = os.getcwd()
    print("current working dir: ", cwd)

    cnn_dir = os.path.dirname(cwd)

    if mode == 'integrated':
        data_dir = os.path.join(cnn_dir, "dogs_vs_cats_integrated")
    elif mode == 'native':
        data_dir = os.path.join(cnn_dir, "dogs_vs_cats_smaller")
    train_dir = os.path.join(data_dir, "train")  # global
    validation_dir = os.path.join(data_dir, "validation")  # global
    print("train data is in ... ", train_dir)
    print("validation data is in ...", validation_dir)

    # make log dir -----
    log_dir = os.path.join(cwd, 'comp_log')
    os.makedirs(log_dir, exist_ok=True)
    child_log_dir = os.path.join(log_dir, "{}_{}".format(mode, base))
    os.makedirs(child_log_dir, exist_ok=True)

    dh = DaHandler()
    train_generator = dh.dataGeneratorFromDir(target_dir=train_dir)
    validation_generator = dh.dataGeneratorFromDir(target_dir=validation_dir)

    data_checker, label_checker = next(train_generator)

    print("data_checker shape : ", data_checker.shape)
    print("label_checker shape : ", label_checker.shape)

    INPUT_SIZE = data_checker.shape[1]
    print("INPUT_SIZE: ", INPUT_SIZE)

    CHANNEL = data_checker.shape[3]
    print("set channel : ", CHANNEL)

    batch_size = data_checker.shape[0]
    print("batch_size : ", batch_size)

    mh = ModelHandler(INPUT_SIZE, CHANNEL)

    if base == 'mymodel':
        model = mh.buildMyModel()
    elif base == 'mnv1':
        model = mh.buildTlearnModel(base='mnv1')

    model.summary()

    steps_per_epoch = train_generator.n // batch_size
    validation_steps = validation_generator.n // batch_size
    print(steps_per_epoch, " [steps / epoch]")
    print(validation_steps, " (validation steps)")

    if mode == 'native':
        set_epochs *= 2

    history = model.fit_generator(train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=set_epochs,
                                  validation_data=validation_generator,
                                  validation_steps=validation_steps,
                                  verbose=1)

    # save model & weights
    model_file = os.path.join(child_log_dir,
                              '{}_{}_model.h5'.format(mode, base))
    model.save(model_file)

    # save history
    history_file = os.path.join(child_log_dir,
                                '{}_{}_history.pkl'.format(mode, base))
    with open(history_file, 'wb') as p:
        pickle.dump(history.history, p)

    print("\nexport logs in ", child_log_dir)

    # return 処理 -----
    acc_list = history.history['accuracy']
    last_acc = acc_list[len(acc_list) - 1]
    print("\nlast accuracy: ", last_acc)
    val_acc_list = history.history['val_accuracy']
    last_val_acc = val_acc_list[len(val_acc_list) - 1]
    print("last validation accuracy: ", last_val_acc)

    return last_acc, last_val_acc
예제 #16
0
def compute_bert_embedding( ):
    args = coqa_parser( )
    handler = ModelHandler(args)