def modify_story(story_id):
    if request.method == 'GET':
        data = load_data()
        if data.get(story_id) is None:
            return redirect(url_for('add_story'))
        status = dict(
            zip(STATUSES, [x == data[story_id]['status'] for x in STATUSES]))
        return render_template('form.html',
                               edit=True,
                               story=data[story_id],
                               status=status)
    elif request.method == 'POST':
        data = load_data()
        # update entry
        data[story_id] = {
            'id': story_id,
            'title': request.form.get('title'),
            'description': request.form.get('description'),
            'accept': request.form.get('accept'),
            'business_value': int(request.form.get('business_value')),
            'estimation': float(request.form.get('estimation')),
            'status': request.form.get('status')
        }
        save_data(data)
        return redirect(url_for('list_stories'))
def add_story():
    if request.method == 'GET':
        base_story = {
            'id': None,
            'title': '',
            'description': '',
            'accept': '',
            'business_value': 1000,
            'estimation': 2.5,
            'status': 'Planning'
        }
        base_status = dict(
            zip(STATUSES, [x == base_story['status'] for x in STATUSES]))
        # return empty form
        return render_template('form.html',
                               edit=False,
                               story=base_story,
                               status=base_status)
    elif request.method == 'POST':
        data = load_data()
        newuid = generate_uniqe_id(data)
        # add new entry
        data[newuid] = {
            'id': newuid,
            'title': request.form.get('title'),
            'description': request.form.get('description'),
            'accept': request.form.get('accept'),
            'business_value': int(request.form.get('business_value')),
            'estimation': float(request.form.get('estimation')),
            'status': request.form.get('status')
        }
        save_data(data)
        # redirect to list page
        return redirect(url_for('list_stories'))
Esempio n. 3
0
    def get_data_to_prediction(self, code, num_step):
        dict_data_kospi = self.creon_7400_코스피()
        df_kospi = pd.DataFrame(dict_data_kospi)
        df_kospi = df_kospi.rename(columns={'close': 'close_kospi'})

        dict_data_stock = self.creon_7400_주식차트조회(code, 'D', 1)
        df_stock = pd.DataFrame(dict_data_stock)

        dict_data_bond = self.creon_7818_국채()
        df_bond = pd.DataFrame(dict_data_bond)
        df_bond = df_bond.rename(columns={'close': 'close_bond'})

        dict_data_7059 = self.creon_7059_주식지수(code)

        data = pd.merge(df_bond, df_kospi, "left", "date")
        data = pd.merge(data, df_stock, "left", "date")

        data['per'] = dict_data_7059['per']
        data['pbr'] = dict_data_7059['pbr']
        data['roe'] = dict_data_7059['roe']

        data = data.sort_values(by='date', ascending=True).reset_index()

        windows = [5, 20, 60, 120]
        for window in windows:
            data['market_kospi_ma{}'.format(window)] = data['close_kospi'].rolling(window).mean()
            data['market_kospi_ma{}_ratio'.format(window)] = (data['close_kospi'] - data[
                'market_kospi_ma{}'.format(window)]) / \
                                                             data['market_kospi_ma{}'.format(window)]

            data['bond_k3y_ma{}'.format(window)] = data['close_bond'].rolling(window).mean()
            data['bond_k3y_ma{}_ratio'.format(window)] = (data['close_bond'] - data['bond_k3y_ma{}'.format(window)]) / \
                                                         data['bond_k3y_ma{}'.format(window)]

        data = data[['date', 'open', 'high', 'low', 'close', 'volume', 'per', 'pbr', 'roe', 'market_kospi_ma5_ratio',
                     'market_kospi_ma20_ratio', 'market_kospi_ma60_ratio', 'market_kospi_ma120_ratio',
                     'bond_k3y_ma5_ratio', 'bond_k3y_ma20_ratio', 'bond_k3y_ma60_ratio', 'bond_k3y_ma120_ratio']]

        date_start = str(data['date'].iloc[-num_step])
        date_end = str(data['date'].iloc[-1])

        data = data_manager.load_data(data, date_start, date_end, 'v2',  False)[1]
        data = data.values.tolist()

        return [d + [0.5, 0.5] for d in data]
Esempio n. 4
0
def main(args):
    """
        Description: Create, train and test the conv net
    """

    # Get the data
    train_data, train_labels, test_data, test_labels = load_data(args[1])

    print(np.shape(train_data))

    # Build our ConvNet
    conv_net = ConvolutionalNet(int(args[5]), int(args[6]),
                                np.shape(train_data)[1],
                                np.shape(test_data[0])[1])

    # Train the ConvNet
    conv_net.train(train_data, train_labels, int(args[2]), int(args[3]),
                   float(args[4]))

    # Test the ConvNet
    conv_net.test(test_data, test_labels)
Esempio n. 5
0
import numpy as np
import keras
from keras.models import Sequential
from keras.optimizers import SGD
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Dropout, Flatten, Dense, Activation
from keras.layers.normalization import BatchNormalization
from data_manager import RESIZE_DIMENSION, int2label, label2int, load_data
NUM_CLASS = len(int2label)
INPUT_DIM = RESIZE_DIMENSION * RESIZE_DIMENSION * 3

x_train, y_train = load_data("data/train", keep_original=True)
y_train = keras.utils.to_categorical(y_train, num_classes=NUM_CLASS)
x_test, y_test = load_data("data/train", keep_original=True)
y_test = keras.utils.to_categorical(y_test, num_classes=NUM_CLASS)

model = Sequential()
model.add(
    BatchNormalization(input_shape=(RESIZE_DIMENSION, RESIZE_DIMENSION, 3)))
model.add(
    Conv2D(filters=16,
           kernel_size=3,
           kernel_initializer='he_normal',
           activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(BatchNormalization())

model.add(
    Conv2D(filters=32,
           kernel_size=3,
           kernel_initializer='he_normal',
Esempio n. 6
0
                img3d.shape,
                pixel_size_mm=PIXEL_SIZE_MM_SETUP,
                masks=cylindric_masks,
                radius_coef=radius_coefs[polimer_type])

            dm.save_plot(fig, SAVE_IMG_DESKTOP_SETUP_FOLDER,
                         f'hist {sample_id} {sample_name}')


if __name__ == '__main__':

    polimer_type = ["PDL-05", "PDLG-5002"][0]
    radius_coefs = {"PDL-05": 0.9, "PDLG-5002": 0.95}

    paths = file_paths.get_benchtop_setup_paths(polimer_type)
    df = dm.load_data("setup_culindric_porosities.csv")
    #df =  pd.DataFrame(columns = ['polimer_type', 'sample_number', 'date', 'mean', 'std'])

    for sample_id in range(len(paths)):
        sample_name = list(paths.keys())[sample_id]
        print(
            f"============== {sample_id} sample: {sample_name} ==============")
        print(sample_name.split())

        img3d = ~get_bin_img(sample_name)
        print('tot: ', np.sum(img3d) / img3d.size)

        fig, ax = plt.subplots()
        ax.imshow(img3d[0], cmap="gray")
        dm.save_plot(fig, "previews", f'{sample_id} bin ' + sample_name)
def del_story(story_id):
    data = load_data()
    if data.get(story_id) is not None:
        del (data[story_id])
    save_data(data)
    return redirect(url_for('list_stories'))
Esempio n. 8
0
def executor(agrs):
    # Keras Backend 설정
    if args.backend == 'tensorflow':
        os.environ['KERAS_BACKEND'] = 'tensorflow'
    elif args.backend == 'plaidml':
        os.environ['KERAS_BACKEND'] = 'plaidml.keras.backend'

    # 출력 경로 설정
    output_path = os.path.join(
        settings.BASE_DIR, 'output/{}_{}_{}'.format(args.output_name,
                                                    args.rl_method, args.net))
    if not os.path.isdir(output_path):
        os.makedirs(output_path)

    # 파라미터 기록
    with open(os.path.join(output_path, 'params.json'), 'w') as f:
        f.write(json.dumps(vars(args)))

    # 로그 기록 설정
    file_handler = logging.FileHandler(filename=os.path.join(
        output_path, "{}.log".format(args.output_name)),
                                       encoding='utf-8')
    stream_handler = logging.StreamHandler(sys.stdout)
    file_handler.setLevel(logging.DEBUG)
    stream_handler.setLevel(logging.INFO)
    logging.basicConfig(format="%(message)s",
                        handlers=[file_handler, stream_handler],
                        level=logging.DEBUG)

    # 로그, Keras Backend 설정을 먼저하고 RLTrader 모듈들을 이후에 임포트해야 함
    from agent import Agent
    from learners import DQNLearner

    # 모델 경로 준비
    value_network_path = ''
    policy_network_path = ''
    if args.value_network_name is not None:
        value_network_path = os.path.join(
            settings.BASE_DIR, 'models/{}.h5'.format(args.value_network_name))
    else:
        value_network_path = os.path.join(
            output_path, '{}_{}_value_{}.h5'.format(args.rl_method, args.net,
                                                    args.output_name))
    if args.policy_network_name is not None:
        policy_network_path = os.path.join(
            settings.BASE_DIR, 'models/{}.h5'.format(args.policy_network_name))
    else:
        policy_network_path = os.path.join(
            output_path, '{}_{}_policy_{}.h5'.format(args.rl_method, args.net,
                                                     args.output_name))

    common_params = {}
    list_stock_code = []
    list_chart_data = []
    list_training_data = []
    list_min_trading_unit = []
    list_max_trading_unit = []

    stock_code = args.stock_code

    get_data.get_data(stock_code, args.start_date, args.end_date, ver=args.ver)

    # 차트 데이터, 학습 데이터 준비
    chart_data, training_data = data_manager.load_data(os.path.join(
        settings.BASE_DIR, 'data/{}/{}.csv'.format(args.ver, stock_code)),
                                                       args.start_date,
                                                       args.end_date,
                                                       ver=args.ver)

    # 최소/최대 투자 단위 설정
    min_trading_unit = max(int(10000 / chart_data.iloc[-1]['close']), 1)
    max_trading_unit = max(int(100000 / chart_data.iloc[-1]['close']), 1)

    # 공통 파라미터 설정
    common_params = {
        'rl_method': args.rl_method,
        'delayed_reward_threshold': args.delayed_reward_threshold,
        'net': args.net,
        'num_steps': args.num_steps,
        'lr': args.lr,
        'output_path': output_path,
        'reuse_models': args.reuse_models
    }

    # 강화학습 시작
    learner = None

    common_params.update({
        'stock_code': stock_code,
        'chart_data': chart_data,
        'training_data': training_data,
        'min_trading_unit': min_trading_unit,
        'max_trading_unit': max_trading_unit
    })

    learner = DQNLearner(
        **{
            **common_params, 'value_network_path': value_network_path
        })

    if learner is not None:
        pvs = learner.run(balance=args.balance,
                          num_epoches=args.num_epoches,
                          discount_factor=args.discount_factor,
                          start_epsilon=args.start_epsilon,
                          learning=args.learning)
        learner.save_models()

    return chart_data, pvs
Esempio n. 9
0
        policy_network_path = os.path.join(
            output_path, '{}_{}_policy_{}.h5'.format(args.rl_method, args.net,
                                                     args.output_name))

    common_params = {}
    list_stock_code = []
    list_chart_data = []
    list_training_data = []
    list_min_trading_unit = []
    list_max_trading_unit = []

    for stock_code in args.stock_code:
        # 차트 데이터, 학습 데이터 준비
        chart_data, training_data = data_manager.load_data(os.path.join(
            settings.BASE_DIR, 'data/{}/{}.csv'.format(args.ver, stock_code)),
                                                           args.start_date,
                                                           args.end_date,
                                                           ver=args.ver)

        # 최소/최대 투자 단위 설정
        min_trading_unit = max(int(100000 / chart_data.iloc[-1]['close']), 1)
        max_trading_unit = max(int(1000000 / chart_data.iloc[-1]['close']), 1)

        # 공통 파라미터 설정
        common_params = {
            'rl_method': args.rl_method,
            'delayed_reward_threshold': args.delayed_reward_threshold,
            'net': args.net,
            'num_steps': args.num_steps,
            'lr': args.lr,
            'output_path': output_path,
Esempio n. 10
0
    d = {
        "costs": costs,
        "Y_prediction_test": Y_prediction_test,
        "Y_prediction_train": Y_prediction_train,
        "w": w,
        "b": b,
        "learning_rate": learning_rate,
        "num_iterations": num_iterations
    }

    return d


if __name__ == "__main__":
    # load train data
    X_train, train_ids = load_data(sys.argv[1])

    # load test data
    X_test, test_ids = load_data(sys.argv[2])

    # preprocesing label data
    Y_train = ids2label(train_ids)
    Y_test = ids2label(test_ids)
    d = model(X_train,
              Y_train,
              X_test,
              Y_test,
              num_iterations=2000,
              learning_rate=0.005,
              print_cost=True)
def get_data(username):
    return jsonify(data_manager.load_data(username))
Esempio n. 12
0
from data_manager import load_data, preprocess, get_train_data
from environment import Environment

ticker = 'MSFT'
start_date = '2010-01-01'

df = load_data(ticker, start_date)
df['Date'] = df['Date'].astype('str')
# print(df.head())
predf = preprocess(df)
# print(predf.head())

print(Environment(predf).observe())
print(Environment(predf).get_price())
Esempio n. 13
0
import data_manager as dm
import neuron_analysis as ana
import pupil_data as pup
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns

#%%
data = dm.load_data()
spikes, pupil, contrast = dm.filter_data(data,
                                         session=21,
                                         areas=['all_vis'],
                                         time_bins=(50, 150))
results = ana.get_contrast_modulation(spikes, contrast)

neuron_id = 8

sns.barplot(data=df2, x='contrast', y='fr')

dm.survey_contrast_modulation(data, thresh_p=0.05, thresh_var=0.05)

#%% pupil size

# Look at a few random pupil traces
spikes, pupil, contrast = dm.filter_data(data,
                                         session=21,
                                         areas=['all_vis'],
                                         time_bins=(25, 45))
n_rows = 4
n_cols = 6
Esempio n. 14
0
    def train(self, model):

        #training parameters
        batch_size = 128
        maxepoches = self.maxepochs
        #         learning_rate = 0.1
        #         lr_decay = 1e-6
        #         lr_drop = 20

        # The data, shuffled and split between train and test sets:
        if self.data_name == "custom":
            assert (self.train_now == True)
            _, (x_test, y_test) = data_manager.load_data("mnist")
            (x_train, y_train) = self.in_memory_train_data
            if (len(np.shape(x_train)) == 3):
                x_train = tf.expand_dims(x_train, 3)
            if (len(y_train[0]) != self.num_classes):
                y_train = tensorflow.keras.utils.to_categorical(
                    y_train, self.num_classes)
        else:
            (x_train,
             y_train), (x_test,
                        y_test) = data_manager.load_data(self.data_name)

        x_train, x_test = self.normalize(x_train, x_test)

        #         def lr_scheduler(epoch):
        #             return learning_rate * (0.5 ** (epoch // lr_drop))
        #         reduce_lr = tensorflow.keras.callbacks.LearningRateScheduler(lr_scheduler)

        #         #data augmentation
        #         datagen = ImageDataGenerator(
        #             featurewise_center=False,  # set input mean to 0 over the dataset
        #             samplewise_center=False,  # set each sample mean to 0
        #             featurewise_std_normalization=False,  # divide inputs by std of the dataset
        #             samplewise_std_normalization=False,  # divide each input by its std
        #             zca_whitening=False,  # apply ZCA whitening
        #             rotation_range=15,  # randomly rotate images in the range (degrees, 0 to 180)
        #             width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        #             height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        #             horizontal_flip=True,  # randomly flip images
        #             vertical_flip=False)  # randomly flip images
        #         # (std, mean, and principal components if ZCA whitening is applied).
        #         datagen.fit(x_train)

        #optimization details
        #         sgd = optimizers.SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)

        adam = optimizers.Adam(learning_rate=0.001,
                               beta_1=0.9,
                               beta_2=0.999,
                               amsgrad=False)

        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=['acc'])

        # early stopping
        estop = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                 min_delta=0,
                                                 patience=0,
                                                 verbose=0,
                                                 mode='auto')

        # training process in a for loop with learning rate drop every 25 epoches.

        historytemp = model.fit(
            x_train,
            y_train,
            shuffle=self.shuffle,
            batch_size=batch_size,
            #                                 steps_per_epoch = np.shape(x_train)[0] // batch_size,
            epochs=maxepoches,
            validation_data=(x_test, y_test),
            callbacks=[estop],
            verbose=1)
        weights_path = "weights/mnistcnn_" + self.data_name + ".h5"
        model.save_weights(weights_path)
        return model
Esempio n. 15
0
log = setup_logging("../output")

# load the  configuration  
log.info('configuration has been loaded')
config=load_config(pathConfig)
nb_clusters = config['nb_clusters']
random_state = config['random_state']
pathData = config['pathData']
features = config['features']





# load the data 
data = load_data(pathData, features)
log.info('Data has been loaded')

# Compute k-means clustering.
results,model = clustering_KMeans(data, nb_clusters, random_state)
log.info('the model has been trained')

# add feature num cluster to the data frame
features.append("num_cluster")
clustered_data = pd.DataFrame(results,columns=features)

# save data
save_data(pathData,clustered_data)
log.info('Results saved')

Esempio n. 16
0
            else:
                value_network_path = os.path.join(
                    output_path,
                    '{}_{}_value_{}.h5'.format(args.rl_method, args.net,
                                               output_name))
                policy_network_path = os.path.join(
                    output_path,
                    '{}_{}_policy_{}.h5'.format(args.rl_method, args.net,
                                                output_name))
        #################################################### 20210514 추가본, 각 주식 코드별로 구현
        # 차트 데이터, 학습 데이터 준비
        #chart_data, training_data = data_manager.load_data(os.path.join(settings.BASE_DIR,'data/{}/{}_data.txt'.format(args.ver, stock_code)), start_time, end_time, ver=args.ver)
        chart_data, training_data = data_manager.load_data(
            os.path.join(
                settings.BASE_DIR,
                'files/OSSP_KOSPI/{}_day_data.txt'.format(stock_code)),
            ver=args.ver,
            start_time=start_time,
            end_time=end_time)
        # 최소/최대 투자 단위 설정
        min_trading_unit = 1  #max(int((args.balance)/100 / chart_data.iloc[-1]['close']), 1)
        max_trading_unit = max(
            int(args.balance / chart_data.iloc[-1]['close']), 1)

        # 공통 파라미터 설정
        common_params = {
            'rl_method': args.rl_method,
            'delayed_reward_threshold': args.delayed_reward_threshold,
            'net': args.net,
            'num_steps': args.num_steps,
            'lr': args.lr,
Esempio n. 17
0
        env = Dummy()

        env.x0 = tf.placeholder(tf.float32,
                                (batch_size, maxlen0, embedding_dim),
                                name='x0')
        env.x1 = tf.placeholder(tf.float32,
                                (batch_size, maxlen1, embedding_dim),
                                name='x1')
        env.y = tf.placeholder(tf.float32, (batch_size, 1), name='y')
        build_model(env)
        sess = tf.InteractiveSession()
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        print('Load data start...')
        X_train_qu, X_train_col, y_train, X_test_qu, X_test_col, y_test, X_dev_qu, X_dev_col, y_dev = load_data(
            datapath='data/geo')
        print('Load data done...')

        if args.mode == 'train':
            train(sess,
                  env,
                  X_train_qu,
                  X_train_col,
                  y_train,
                  X_dev_qu,
                  X_dev_col,
                  y_dev,
                  epochs=train_epochs,
                  load=False,
                  shuffle=True,
                  batch_size=batch_size,
    def train(self, model):

        #training parameters
        batch_size = 128
        maxepoches = self.maxepochs
        learning_rate = 0.1
        lr_decay = 1e-6
        lr_drop = 20

        # The data, shuffled and split between train and test sets:
        (x_train, y_train), (x_test,
                             y_test) = data_manager.load_data(self.data_name)
        x_train, x_test = self.normalize(x_train, x_test)

        def lr_scheduler(epoch):
            return learning_rate * (0.5**(epoch // lr_drop))

        reduce_lr = tensorflow.keras.callbacks.LearningRateScheduler(
            lr_scheduler)

        #         #data augmentation
        #         datagen = ImageDataGenerator(
        #             featurewise_center=False,  # set input mean to 0 over the dataset
        #             samplewise_center=False,  # set each sample mean to 0
        #             featurewise_std_normalization=False,  # divide inputs by std of the dataset
        #             samplewise_std_normalization=False,  # divide each input by its std
        #             zca_whitening=False,  # apply ZCA whitening
        #             rotation_range=15,  # randomly rotate images in the range (degrees, 0 to 180)
        #             width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
        #             height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
        #             horizontal_flip=True,  # randomly flip images
        #             vertical_flip=False)  # randomly flip images
        #         # (std, mean, and principal components if ZCA whitening is applied).
        #         datagen.fit(x_train)

        #optimization details
        sgd = optimizers.SGD(lr=learning_rate,
                             decay=lr_decay,
                             momentum=0.9,
                             nesterov=True)
        model.compile(loss='categorical_crossentropy',
                      optimizer=sgd,
                      metrics=['acc'])

        # early stopping
        estop = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                 min_delta=0,
                                                 patience=0,
                                                 verbose=0,
                                                 mode='auto')

        # training process in a for loop with learning rate drop every 25 epoches.

        historytemp = model.fit(
            x_train,
            y_train,
            batch_size=batch_size,
            #                                 steps_per_epoch = np.shape(x_train)[0] // batch_size,
            epochs=maxepoches,
            validation_data=(x_test, y_test),
            callbacks=[reduce_lr, estop],
            verbose=1)
        model.save_weights('weights/mnistcnn_test.h5')
        return model
def list_stories():
    data = sorted(load_data().values(), key=lambda x: x['title'])
    return render_template('list.html', stories=data)
Esempio n. 20
0
            '8200445_PHAAET_rec290422019_PRincon_S1',
            '8200473_PHAAET_rec24052019_PRincon_S2',
            '8200487_PHAAET_rec04052019_PRincon_S1',
            '8200718_PHAAET_rec08032019_PRincon',
            '8201653_PHAAET_I.Cima_rec21012021_ninho 39_36_S1',
            '8201667_PHAAET_I.Cima_rec21012021_ninho 68_21_S1',
            '8201720_PHAAET_rec31122020_ICima_ninho 71_21_S1',
            '8201959_PHAAET_rec29122020_ICima_ninho 31_36_S1']

### Detect events for a given datasets
for filename in filenames:
    
    path = ""
    
    # Load data and filter acceleration signals with a butterworth filter
    initial_data = data_manager.load_data(filename, path)
    current_data = copy.deepcopy(initial_data)
    current_data.filter_accelerations(4, 0.4)
    all_data.append(current_data)
    print("Data loaded: "+filename)
    
    '''
    ###############################
    ### Plot raw vs filtered signal
    fig, ax = plt.subplots(3,2,figsize = (8,6))
    fig.suptitle("Raw vs filtered acceleration signals")
    ax[0,0].plot(initial_data.ax[10000:10200], 'b-')
    ax[0,0].set_ylim([-3, 3])
    ax[0,1].plot(current_data.ax[10000:10200], 'b-')
    ax[0,1].set_ylim([-3, 3])
    ax[0,0].set_ylabel("ax")
Esempio n. 21
0
    else:
        policy_network_path = os.path.join(
            output_path, '{}_{}_policy_{}.h5'.format(args.rl_method, args.net,
                                                     args.output_name))

    common_params = {}
    list_stock_code = []
    list_chart_data = []
    list_training_data = []
    list_min_trading_unit = []
    list_max_trading_unit = []

    for stock_code in args.stock_code:
        # 데이터 읽어오기
        DBdata = data_manager.read_DBdata(today)
        chart_data, training_data = data_manager.load_data(DBdata)

        # 최소/최대 투자 단위 설정
        min_trading_unit = max(int(100000 / chart_data.iloc[-1]['close']), 1)
        max_trading_unit = max(int(1000000 / chart_data.iloc[-1]['close']), 1)

        # 공통 파라미터 설정
        common_params = {
            'rl_method': args.rl_method,
            'delayed_reward_threshold': args.delayed_reward_threshold,
            'net': args.net,
            'num_steps': args.num_steps,
            'lr': args.lr,
            'output_path': output_path,
            'reuse_models': args.reuse_models
        }