예제 #1
0
    L2_loss = gluon.loss.L2Loss()
    huber_loss = gluon.loss.HuberLoss()
    initializer = mx.initializer.MSRAPrelu()
    optimizer = 'adam'
    optimizer_params = {'learning_rate': args.learning_rate}

    trainer_params_list = {
        'batch_size': args.batch_size,
        'epoch_num': args.epochs,
        'loss_func': abs_loss,
        'initializer': initializer,
        'optimizer': optimizer,
        'optimizer_params': optimizer_params,
        'patience': args.patience
    }

    train_mark = 'testing'

    nn_trainer(train_mark,
               model1,
               test_sub_valid_X,
               test_future_test_X,
               test_sub_valid_Y,
               sub_train_nd,
               sub_valid_X,
               future_test_X,
               sub_valid_Y,
               trainer_params_list=trainer_params_list,
               ctx=ctx,
               model_name=args.model_name)
batch_size = 128
n_epochs = 500
"""
The model training
"""
### The model parameters
abs_loss = gluon.loss.L1Loss()
L2_loss = gluon.loss.L2Loss()
huber_loss = gluon.loss.HuberLoss()
initializer = mx.initializer.MSRAPrelu()
optimizer = 'adam'
optimizer_params = {'learning_rate': 0.01}

trainer_params_list = {
    'batch_size': batch_size,
    'epoch_num': n_epochs,
    'loss_func': huber_loss,
    'initializer': initializer,
    'optimizer': optimizer,
    'optimizer_params': optimizer_params
}
train_mark = 'testing'
nn_trainer(train_mark,
           model1,
           sub_train_nd,
           conv_valid_X,
           sub_valid_X,
           sub_valid_Y,
           trainer_params_list=trainer_params_list,
           ctx=ctx)
예제 #3
0
with open('feature_prepare.pkl', 'rb') as f:
    [trainX_dt,trainX2_dt, trainY_dt,trainY2_dt, testX_dt, testX2_dt,testY_dt,testY2_dt] = pickle.load(f)

sub_train_X, sub_train_Y = nd.array(trainX_dt, ctx=ctx), nd.array(trainY_dt, ctx=ctx)
sub_valid_X, sub_valid_Y = nd.array(testX_dt, ctx=ctx), nd.array(testY_dt, ctx=ctx)
future_train_X, future_test_X = nd.array(trainY2_dt, ctx=ctx), nd.array(testY2_dt, ctx=ctx)

sub_train_nd = gluon.data.ArrayDataset(sub_train_X, future_train_X, sub_train_Y)

model1 = TCN()
#choose parameters
batch_size= 512
n_epochs=500
"""
The model training
"""
### The model parameters
abs_loss = gluon.loss.L1Loss()
L2_loss = gluon.loss.L2Loss()
huber_loss = gluon.loss.HuberLoss()
initializer = mx.initializer.MSRAPrelu()
optimizer = 'adam';
optimizer_params = {'learning_rate': 0.5}

trainer_params_list = {'batch_size': batch_size,'epoch_num':n_epochs,
                'loss_func': abs_loss, 'initializer': initializer,
                'optimizer':optimizer, 'optimizer_params':optimizer_params}
train_mark='testing'
nn_trainer(train_mark, model1, sub_train_nd, sub_valid_X,future_test_X, sub_valid_Y, trainer_params_list=trainer_params_list, ctx=ctx)

예제 #4
0
def optimize(optimizer_params):
    """
    The model optimization
    """
    # Keep track of evals
    global ITERATION, out_file, epochs
    ITERATION += 1
    ### The model parameters
    abs_loss = gluon.loss.L1Loss()
    L2_loss = gluon.loss.L2Loss()
    huber_loss = gluon.loss.HuberLoss()
    initializer = mx.initializer.MSRAPrelu()
    optimizer = 'adam'
    optimizer_params['units'] = int(optimizer_params['units'])
    optimizer_params['batch_size'] = int(optimizer_params['batch_size'])
    print("Iteration %d: Creating model" % ITERATION)

    model1 = TCN(input_dimention=args.dim,
                 output_ax=args.horizon,
                 units=optimizer_params['units'],
                 dropout=optimizer_params['dropout'])

    train_mark = 'optimizing'

    #epochs = int(batch/batches[0])
    trainer_params_list = {
        'batch_size': optimizer_params['batch_size'],
        'epoch_num': epochs,
        'loss_func': huber_loss,
        'initializer': initializer,
        'optimizer': optimizer,
        'optimizer_params': {
            'learning_rate': optimizer_params['learning_rate']
        },
        'patience': args.patience,
        'iteration': ITERATION,
        'units': optimizer_params['units'],
        'dropout': optimizer_params['dropout'],
        'lr': optimizer_params['learning_rate']
    }

    print("Training model ... ")
    st_time = datetime.now()

    valid_loss, valid_ND, valid_NRMSE, rho10, rho50, rho90 = nn_trainer(
        train_mark,
        model1,
        test_sub_valid_X,
        test_future_test_X,
        test_sub_valid_Y,
        sub_train_nd,
        sub_valid_X,
        future_test_X,
        sub_valid_Y,
        trainer_params_list=trainer_params_list,
        ctx=ctx,
        model_name=args.model_name)

    eval_time = str(datetime.now() - st_time)
    print("Iteration %d: Getting results ... " % ITERATION)

    return {
        'loss': valid_ND,
        'ND': valid_ND,
        'NRMSE': valid_NRMSE,
        'val_loss': valid_loss,
        'params': optimizer_params,
        'rho_metric': {
            'rho10': rho10,
            'rho50': rho50,
            'rho90': rho90
        },
        'iteration': ITERATION,
        'eval_time': eval_time,
        'status': STATUS_OK
    }